code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True) | def function[sortByKey, parameter[self, ascending, numPartitions, keyfunc]]:
constant[
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
]
if compare[name[numPartitions] is constant[None]] begin[:]
variable[numPartitions] assign[=] call[name[self]._defaultReducePartitions, parameter[]]
variable[memory] assign[=] call[name[self]._memory_limit, parameter[]]
variable[serializer] assign[=] name[self]._jrdd_deserializer
def function[sortPartition, parameter[iterator]]:
variable[sort] assign[=] call[name[ExternalSorter], parameter[binary_operation[name[memory] * constant[0.9]], name[serializer]]].sorted
return[call[name[iter], parameter[call[name[sort], parameter[name[iterator]]]]]]
if compare[name[numPartitions] equal[==] constant[1]] begin[:]
if compare[call[name[self].getNumPartitions, parameter[]] greater[>] constant[1]] begin[:]
variable[self] assign[=] call[name[self].coalesce, parameter[constant[1]]]
return[call[name[self].mapPartitions, parameter[name[sortPartition], constant[True]]]]
variable[rddSize] assign[=] call[name[self].count, parameter[]]
if <ast.UnaryOp object at 0x7da20c6aafb0> begin[:]
return[name[self]]
variable[maxSampleSize] assign[=] binary_operation[name[numPartitions] * constant[20.0]]
variable[fraction] assign[=] call[name[min], parameter[binary_operation[name[maxSampleSize] / call[name[max], parameter[name[rddSize], constant[1]]]], constant[1.0]]]
variable[samples] assign[=] call[call[call[name[self].sample, parameter[constant[False], name[fraction], constant[1]]].map, parameter[<ast.Lambda object at 0x7da20c6a9840>]].collect, parameter[]]
variable[samples] assign[=] call[name[sorted], parameter[name[samples]]]
variable[bounds] assign[=] <ast.ListComp object at 0x7da20e962d70>
def function[rangePartitioner, parameter[k]]:
variable[p] assign[=] call[name[bisect].bisect_left, parameter[name[bounds], call[name[keyfunc], parameter[name[k]]]]]
if name[ascending] begin[:]
return[name[p]]
return[call[call[name[self].partitionBy, parameter[name[numPartitions], name[rangePartitioner]]].mapPartitions, parameter[name[sortPartition], constant[True]]]] | keyword[def] identifier[sortByKey] ( identifier[self] , identifier[ascending] = keyword[True] , identifier[numPartitions] = keyword[None] , identifier[keyfunc] = keyword[lambda] identifier[x] : identifier[x] ):
literal[string]
keyword[if] identifier[numPartitions] keyword[is] keyword[None] :
identifier[numPartitions] = identifier[self] . identifier[_defaultReducePartitions] ()
identifier[memory] = identifier[self] . identifier[_memory_limit] ()
identifier[serializer] = identifier[self] . identifier[_jrdd_deserializer]
keyword[def] identifier[sortPartition] ( identifier[iterator] ):
identifier[sort] = identifier[ExternalSorter] ( identifier[memory] * literal[int] , identifier[serializer] ). identifier[sorted]
keyword[return] identifier[iter] ( identifier[sort] ( identifier[iterator] , identifier[key] = keyword[lambda] identifier[kv] : identifier[keyfunc] ( identifier[kv] [ literal[int] ]), identifier[reverse] =( keyword[not] identifier[ascending] )))
keyword[if] identifier[numPartitions] == literal[int] :
keyword[if] identifier[self] . identifier[getNumPartitions] ()> literal[int] :
identifier[self] = identifier[self] . identifier[coalesce] ( literal[int] )
keyword[return] identifier[self] . identifier[mapPartitions] ( identifier[sortPartition] , keyword[True] )
identifier[rddSize] = identifier[self] . identifier[count] ()
keyword[if] keyword[not] identifier[rddSize] :
keyword[return] identifier[self]
identifier[maxSampleSize] = identifier[numPartitions] * literal[int]
identifier[fraction] = identifier[min] ( identifier[maxSampleSize] / identifier[max] ( identifier[rddSize] , literal[int] ), literal[int] )
identifier[samples] = identifier[self] . identifier[sample] ( keyword[False] , identifier[fraction] , literal[int] ). identifier[map] ( keyword[lambda] identifier[kv] : identifier[kv] [ literal[int] ]). identifier[collect] ()
identifier[samples] = identifier[sorted] ( identifier[samples] , identifier[key] = identifier[keyfunc] )
identifier[bounds] =[ identifier[samples] [ identifier[int] ( identifier[len] ( identifier[samples] )*( identifier[i] + literal[int] )/ identifier[numPartitions] )]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[numPartitions] - literal[int] )]
keyword[def] identifier[rangePartitioner] ( identifier[k] ):
identifier[p] = identifier[bisect] . identifier[bisect_left] ( identifier[bounds] , identifier[keyfunc] ( identifier[k] ))
keyword[if] identifier[ascending] :
keyword[return] identifier[p]
keyword[else] :
keyword[return] identifier[numPartitions] - literal[int] - identifier[p]
keyword[return] identifier[self] . identifier[partitionBy] ( identifier[numPartitions] , identifier[rangePartitioner] ). identifier[mapPartitions] ( identifier[sortPartition] , keyword[True] ) | def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions() # depends on [control=['if'], data=['numPartitions']]
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=not ascending))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1) # depends on [control=['if'], data=[]]
return self.mapPartitions(sortPartition, True) # depends on [control=['if'], data=[]]
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD # depends on [control=['if'], data=[]]
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)] for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p # depends on [control=['if'], data=[]]
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True) |
def invite_by_email(self, email, user, organization, **kwargs):
# type: (Text, AbstractUser, AbstractBaseOrganization) -> OrganizationInvitationBase
"""
Primary interface method by which one user invites another to join
Args:
email:
request:
**kwargs:
Returns:
an invitation instance
Raises:
MultipleObjectsReturned if multiple matching users are found
"""
try:
invitee = self.user_model.objects.get(email__iexact=email)
except self.user_model.DoesNotExist:
invitee = None
# TODO allow sending just the OrganizationUser instance
user_invitation = self.invitation_model.objects.create(
invitee=invitee,
invitee_identifier=email.lower(),
invited_by=user,
organization=organization,
)
self.send_invitation(user_invitation)
return user_invitation | def function[invite_by_email, parameter[self, email, user, organization]]:
constant[
Primary interface method by which one user invites another to join
Args:
email:
request:
**kwargs:
Returns:
an invitation instance
Raises:
MultipleObjectsReturned if multiple matching users are found
]
<ast.Try object at 0x7da1b0677c10>
variable[user_invitation] assign[=] call[name[self].invitation_model.objects.create, parameter[]]
call[name[self].send_invitation, parameter[name[user_invitation]]]
return[name[user_invitation]] | keyword[def] identifier[invite_by_email] ( identifier[self] , identifier[email] , identifier[user] , identifier[organization] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[invitee] = identifier[self] . identifier[user_model] . identifier[objects] . identifier[get] ( identifier[email__iexact] = identifier[email] )
keyword[except] identifier[self] . identifier[user_model] . identifier[DoesNotExist] :
identifier[invitee] = keyword[None]
identifier[user_invitation] = identifier[self] . identifier[invitation_model] . identifier[objects] . identifier[create] (
identifier[invitee] = identifier[invitee] ,
identifier[invitee_identifier] = identifier[email] . identifier[lower] (),
identifier[invited_by] = identifier[user] ,
identifier[organization] = identifier[organization] ,
)
identifier[self] . identifier[send_invitation] ( identifier[user_invitation] )
keyword[return] identifier[user_invitation] | def invite_by_email(self, email, user, organization, **kwargs):
# type: (Text, AbstractUser, AbstractBaseOrganization) -> OrganizationInvitationBase
'\n Primary interface method by which one user invites another to join\n\n Args:\n email:\n request:\n **kwargs:\n\n Returns:\n an invitation instance\n\n Raises:\n MultipleObjectsReturned if multiple matching users are found\n\n '
try:
invitee = self.user_model.objects.get(email__iexact=email) # depends on [control=['try'], data=[]]
except self.user_model.DoesNotExist:
invitee = None # depends on [control=['except'], data=[]]
# TODO allow sending just the OrganizationUser instance
user_invitation = self.invitation_model.objects.create(invitee=invitee, invitee_identifier=email.lower(), invited_by=user, organization=organization)
self.send_invitation(user_invitation)
return user_invitation |
def populate_metadata(model, MetadataClass):
""" For a given model and metadata class, ensure there is metadata for every instance.
"""
content_type = ContentType.objects.get_for_model(model)
for instance in model.objects.all():
create_metadata_instance(MetadataClass, instance) | def function[populate_metadata, parameter[model, MetadataClass]]:
constant[ For a given model and metadata class, ensure there is metadata for every instance.
]
variable[content_type] assign[=] call[name[ContentType].objects.get_for_model, parameter[name[model]]]
for taget[name[instance]] in starred[call[name[model].objects.all, parameter[]]] begin[:]
call[name[create_metadata_instance], parameter[name[MetadataClass], name[instance]]] | keyword[def] identifier[populate_metadata] ( identifier[model] , identifier[MetadataClass] ):
literal[string]
identifier[content_type] = identifier[ContentType] . identifier[objects] . identifier[get_for_model] ( identifier[model] )
keyword[for] identifier[instance] keyword[in] identifier[model] . identifier[objects] . identifier[all] ():
identifier[create_metadata_instance] ( identifier[MetadataClass] , identifier[instance] ) | def populate_metadata(model, MetadataClass):
""" For a given model and metadata class, ensure there is metadata for every instance.
"""
content_type = ContentType.objects.get_for_model(model)
for instance in model.objects.all():
create_metadata_instance(MetadataClass, instance) # depends on [control=['for'], data=['instance']] |
def combine_meta_data(files_dict, meta_data_v2=True):
"""
Takes the dict of hdf5 files and combines their meta data tables into one new numpy record array.
Parameters
----------
meta_data_v2 : bool
True for new (v2) meta data format, False for the old (v1) format.
"""
if len(files_dict) > 10:
logging.info("Combine the meta data from %d files", len(files_dict))
# determine total length needed for the new combined array, thats the fastest way to combine arrays
total_length = 0 # the total length of the new table
for file_name in files_dict.iterkeys():
with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file
total_length += in_file_h5.root.meta_data.shape[0]
if meta_data_v2:
meta_data_combined = np.empty((total_length, ), dtype=[
('index_start', np.uint32),
('index_stop', np.uint32),
('data_length', np.uint32),
('timestamp_start', np.float64),
('timestamp_stop', np.float64),
('error', np.uint32)])
else:
meta_data_combined = np.empty((total_length, ), dtype=[
('start_index', np.uint32),
('stop_index', np.uint32),
('length', np.uint32),
('timestamp', np.float64),
('error', np.uint32)])
if len(files_dict) > 10:
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=total_length, term_width=80)
progress_bar.start()
index = 0
# fill actual result array
for file_name in files_dict.iterkeys():
with tb.open_file(file_name, mode="r") as in_file_h5: # open the actual file
array_length = in_file_h5.root.meta_data.shape[0]
meta_data_combined[index:index + array_length] = in_file_h5.root.meta_data[:]
index += array_length
if len(files_dict) > 10:
progress_bar.update(index)
if len(files_dict) > 10:
progress_bar.finish()
return meta_data_combined | def function[combine_meta_data, parameter[files_dict, meta_data_v2]]:
constant[
Takes the dict of hdf5 files and combines their meta data tables into one new numpy record array.
Parameters
----------
meta_data_v2 : bool
True for new (v2) meta data format, False for the old (v1) format.
]
if compare[call[name[len], parameter[name[files_dict]]] greater[>] constant[10]] begin[:]
call[name[logging].info, parameter[constant[Combine the meta data from %d files], call[name[len], parameter[name[files_dict]]]]]
variable[total_length] assign[=] constant[0]
for taget[name[file_name]] in starred[call[name[files_dict].iterkeys, parameter[]]] begin[:]
with call[name[tb].open_file, parameter[name[file_name]]] begin[:]
<ast.AugAssign object at 0x7da1b1110e20>
if name[meta_data_v2] begin[:]
variable[meta_data_combined] assign[=] call[name[np].empty, parameter[tuple[[<ast.Name object at 0x7da1b11113f0>]]]]
if compare[call[name[len], parameter[name[files_dict]]] greater[>] constant[10]] begin[:]
variable[progress_bar] assign[=] call[name[progressbar].ProgressBar, parameter[]]
call[name[progress_bar].start, parameter[]]
variable[index] assign[=] constant[0]
for taget[name[file_name]] in starred[call[name[files_dict].iterkeys, parameter[]]] begin[:]
with call[name[tb].open_file, parameter[name[file_name]]] begin[:]
variable[array_length] assign[=] call[name[in_file_h5].root.meta_data.shape][constant[0]]
call[name[meta_data_combined]][<ast.Slice object at 0x7da1b1110220>] assign[=] call[name[in_file_h5].root.meta_data][<ast.Slice object at 0x7da1b1018d30>]
<ast.AugAssign object at 0x7da1b101ac50>
if compare[call[name[len], parameter[name[files_dict]]] greater[>] constant[10]] begin[:]
call[name[progress_bar].update, parameter[name[index]]]
if compare[call[name[len], parameter[name[files_dict]]] greater[>] constant[10]] begin[:]
call[name[progress_bar].finish, parameter[]]
return[name[meta_data_combined]] | keyword[def] identifier[combine_meta_data] ( identifier[files_dict] , identifier[meta_data_v2] = keyword[True] ):
literal[string]
keyword[if] identifier[len] ( identifier[files_dict] )> literal[int] :
identifier[logging] . identifier[info] ( literal[string] , identifier[len] ( identifier[files_dict] ))
identifier[total_length] = literal[int]
keyword[for] identifier[file_name] keyword[in] identifier[files_dict] . identifier[iterkeys] ():
keyword[with] identifier[tb] . identifier[open_file] ( identifier[file_name] , identifier[mode] = literal[string] ) keyword[as] identifier[in_file_h5] :
identifier[total_length] += identifier[in_file_h5] . identifier[root] . identifier[meta_data] . identifier[shape] [ literal[int] ]
keyword[if] identifier[meta_data_v2] :
identifier[meta_data_combined] = identifier[np] . identifier[empty] (( identifier[total_length] ,), identifier[dtype] =[
( literal[string] , identifier[np] . identifier[uint32] ),
( literal[string] , identifier[np] . identifier[uint32] ),
( literal[string] , identifier[np] . identifier[uint32] ),
( literal[string] , identifier[np] . identifier[float64] ),
( literal[string] , identifier[np] . identifier[float64] ),
( literal[string] , identifier[np] . identifier[uint32] )])
keyword[else] :
identifier[meta_data_combined] = identifier[np] . identifier[empty] (( identifier[total_length] ,), identifier[dtype] =[
( literal[string] , identifier[np] . identifier[uint32] ),
( literal[string] , identifier[np] . identifier[uint32] ),
( literal[string] , identifier[np] . identifier[uint32] ),
( literal[string] , identifier[np] . identifier[float64] ),
( literal[string] , identifier[np] . identifier[uint32] )])
keyword[if] identifier[len] ( identifier[files_dict] )> literal[int] :
identifier[progress_bar] = identifier[progressbar] . identifier[ProgressBar] ( identifier[widgets] =[ literal[string] , identifier[progressbar] . identifier[Percentage] (), literal[string] , identifier[progressbar] . identifier[Bar] ( identifier[marker] = literal[string] , identifier[left] = literal[string] , identifier[right] = literal[string] ), literal[string] , identifier[progressbar] . identifier[AdaptiveETA] ()], identifier[maxval] = identifier[total_length] , identifier[term_width] = literal[int] )
identifier[progress_bar] . identifier[start] ()
identifier[index] = literal[int]
keyword[for] identifier[file_name] keyword[in] identifier[files_dict] . identifier[iterkeys] ():
keyword[with] identifier[tb] . identifier[open_file] ( identifier[file_name] , identifier[mode] = literal[string] ) keyword[as] identifier[in_file_h5] :
identifier[array_length] = identifier[in_file_h5] . identifier[root] . identifier[meta_data] . identifier[shape] [ literal[int] ]
identifier[meta_data_combined] [ identifier[index] : identifier[index] + identifier[array_length] ]= identifier[in_file_h5] . identifier[root] . identifier[meta_data] [:]
identifier[index] += identifier[array_length]
keyword[if] identifier[len] ( identifier[files_dict] )> literal[int] :
identifier[progress_bar] . identifier[update] ( identifier[index] )
keyword[if] identifier[len] ( identifier[files_dict] )> literal[int] :
identifier[progress_bar] . identifier[finish] ()
keyword[return] identifier[meta_data_combined] | def combine_meta_data(files_dict, meta_data_v2=True):
"""
Takes the dict of hdf5 files and combines their meta data tables into one new numpy record array.
Parameters
----------
meta_data_v2 : bool
True for new (v2) meta data format, False for the old (v1) format.
"""
if len(files_dict) > 10:
logging.info('Combine the meta data from %d files', len(files_dict)) # depends on [control=['if'], data=[]]
# determine total length needed for the new combined array, thats the fastest way to combine arrays
total_length = 0 # the total length of the new table
for file_name in files_dict.iterkeys():
with tb.open_file(file_name, mode='r') as in_file_h5: # open the actual file
total_length += in_file_h5.root.meta_data.shape[0] # depends on [control=['with'], data=['in_file_h5']] # depends on [control=['for'], data=['file_name']]
if meta_data_v2:
meta_data_combined = np.empty((total_length,), dtype=[('index_start', np.uint32), ('index_stop', np.uint32), ('data_length', np.uint32), ('timestamp_start', np.float64), ('timestamp_stop', np.float64), ('error', np.uint32)]) # depends on [control=['if'], data=[]]
else:
meta_data_combined = np.empty((total_length,), dtype=[('start_index', np.uint32), ('stop_index', np.uint32), ('length', np.uint32), ('timestamp', np.float64), ('error', np.uint32)])
if len(files_dict) > 10:
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=total_length, term_width=80)
progress_bar.start() # depends on [control=['if'], data=[]]
index = 0
# fill actual result array
for file_name in files_dict.iterkeys():
with tb.open_file(file_name, mode='r') as in_file_h5: # open the actual file
array_length = in_file_h5.root.meta_data.shape[0]
meta_data_combined[index:index + array_length] = in_file_h5.root.meta_data[:]
index += array_length
if len(files_dict) > 10:
progress_bar.update(index) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['in_file_h5']] # depends on [control=['for'], data=['file_name']]
if len(files_dict) > 10:
progress_bar.finish() # depends on [control=['if'], data=[]]
return meta_data_combined |
def get_logged_in_by(self, login, parent_zc, duration=0):
"""Use another client to get logged in via preauth mechanism by an
already logged in admin.
It required the domain of the admin user to have preAuthKey
The preauth key cannot be created by API, do it with zmprov :
zmprov gdpak <domain>
"""
domain_name = zobjects.Account(name=login).get_domain()
preauth_key = parent_zc.get_domain(domain_name)['zimbraPreAuthKey']
rc = self.REST_PREAUTH(
self._server_host, parent_zc._server_port, preauth_key=preauth_key)
authToken = rc.get_preauth_token(login)
self.login_with_authToken(authToken) | def function[get_logged_in_by, parameter[self, login, parent_zc, duration]]:
constant[Use another client to get logged in via preauth mechanism by an
already logged in admin.
It required the domain of the admin user to have preAuthKey
The preauth key cannot be created by API, do it with zmprov :
zmprov gdpak <domain>
]
variable[domain_name] assign[=] call[call[name[zobjects].Account, parameter[]].get_domain, parameter[]]
variable[preauth_key] assign[=] call[call[name[parent_zc].get_domain, parameter[name[domain_name]]]][constant[zimbraPreAuthKey]]
variable[rc] assign[=] call[name[self].REST_PREAUTH, parameter[name[self]._server_host, name[parent_zc]._server_port]]
variable[authToken] assign[=] call[name[rc].get_preauth_token, parameter[name[login]]]
call[name[self].login_with_authToken, parameter[name[authToken]]] | keyword[def] identifier[get_logged_in_by] ( identifier[self] , identifier[login] , identifier[parent_zc] , identifier[duration] = literal[int] ):
literal[string]
identifier[domain_name] = identifier[zobjects] . identifier[Account] ( identifier[name] = identifier[login] ). identifier[get_domain] ()
identifier[preauth_key] = identifier[parent_zc] . identifier[get_domain] ( identifier[domain_name] )[ literal[string] ]
identifier[rc] = identifier[self] . identifier[REST_PREAUTH] (
identifier[self] . identifier[_server_host] , identifier[parent_zc] . identifier[_server_port] , identifier[preauth_key] = identifier[preauth_key] )
identifier[authToken] = identifier[rc] . identifier[get_preauth_token] ( identifier[login] )
identifier[self] . identifier[login_with_authToken] ( identifier[authToken] ) | def get_logged_in_by(self, login, parent_zc, duration=0):
"""Use another client to get logged in via preauth mechanism by an
already logged in admin.
It required the domain of the admin user to have preAuthKey
The preauth key cannot be created by API, do it with zmprov :
zmprov gdpak <domain>
"""
domain_name = zobjects.Account(name=login).get_domain()
preauth_key = parent_zc.get_domain(domain_name)['zimbraPreAuthKey']
rc = self.REST_PREAUTH(self._server_host, parent_zc._server_port, preauth_key=preauth_key)
authToken = rc.get_preauth_token(login)
self.login_with_authToken(authToken) |
def _find_filepath_in_roots(filename):
"""Look for filename in all MEDIA_ROOTS, and return the first one found."""
for root in settings.DJANGO_STATIC_MEDIA_ROOTS:
filepath = _filename2filepath(filename, root)
if os.path.isfile(filepath):
return filepath, root
# havent found it in DJANGO_STATIC_MEDIA_ROOTS look for apps' files if we're
# in DEBUG mode
if settings.DEBUG:
try:
from django.contrib.staticfiles import finders
absolute_path = finders.find(filename)
if absolute_path:
root, filepath = os.path.split(absolute_path)
return absolute_path, root
except ImportError:
pass
return None, None | def function[_find_filepath_in_roots, parameter[filename]]:
constant[Look for filename in all MEDIA_ROOTS, and return the first one found.]
for taget[name[root]] in starred[name[settings].DJANGO_STATIC_MEDIA_ROOTS] begin[:]
variable[filepath] assign[=] call[name[_filename2filepath], parameter[name[filename], name[root]]]
if call[name[os].path.isfile, parameter[name[filepath]]] begin[:]
return[tuple[[<ast.Name object at 0x7da18bcc8cd0>, <ast.Name object at 0x7da18bccbb80>]]]
if name[settings].DEBUG begin[:]
<ast.Try object at 0x7da18bccb310>
return[tuple[[<ast.Constant object at 0x7da18bcca230>, <ast.Constant object at 0x7da18bcca7a0>]]] | keyword[def] identifier[_find_filepath_in_roots] ( identifier[filename] ):
literal[string]
keyword[for] identifier[root] keyword[in] identifier[settings] . identifier[DJANGO_STATIC_MEDIA_ROOTS] :
identifier[filepath] = identifier[_filename2filepath] ( identifier[filename] , identifier[root] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filepath] ):
keyword[return] identifier[filepath] , identifier[root]
keyword[if] identifier[settings] . identifier[DEBUG] :
keyword[try] :
keyword[from] identifier[django] . identifier[contrib] . identifier[staticfiles] keyword[import] identifier[finders]
identifier[absolute_path] = identifier[finders] . identifier[find] ( identifier[filename] )
keyword[if] identifier[absolute_path] :
identifier[root] , identifier[filepath] = identifier[os] . identifier[path] . identifier[split] ( identifier[absolute_path] )
keyword[return] identifier[absolute_path] , identifier[root]
keyword[except] identifier[ImportError] :
keyword[pass]
keyword[return] keyword[None] , keyword[None] | def _find_filepath_in_roots(filename):
"""Look for filename in all MEDIA_ROOTS, and return the first one found."""
for root in settings.DJANGO_STATIC_MEDIA_ROOTS:
filepath = _filename2filepath(filename, root)
if os.path.isfile(filepath):
return (filepath, root) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['root']]
# havent found it in DJANGO_STATIC_MEDIA_ROOTS look for apps' files if we're
# in DEBUG mode
if settings.DEBUG:
try:
from django.contrib.staticfiles import finders
absolute_path = finders.find(filename)
if absolute_path:
(root, filepath) = os.path.split(absolute_path)
return (absolute_path, root) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ImportError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return (None, None) |
def dispatch_monitor_traffic(self, msg):
"""all ME and Task queue messages come through here, as well as
IOPub traffic."""
self.log.debug("monitor traffic: %r", msg[0])
switch = msg[0]
try:
idents, msg = self.session.feed_identities(msg[1:])
except ValueError:
idents=[]
if not idents:
self.log.error("Monitor message without topic: %r", msg)
return
handler = self.monitor_handlers.get(switch, None)
if handler is not None:
handler(idents, msg)
else:
self.log.error("Unrecognized monitor topic: %r", switch) | def function[dispatch_monitor_traffic, parameter[self, msg]]:
constant[all ME and Task queue messages come through here, as well as
IOPub traffic.]
call[name[self].log.debug, parameter[constant[monitor traffic: %r], call[name[msg]][constant[0]]]]
variable[switch] assign[=] call[name[msg]][constant[0]]
<ast.Try object at 0x7da20c6a8f40>
if <ast.UnaryOp object at 0x7da1b002c5e0> begin[:]
call[name[self].log.error, parameter[constant[Monitor message without topic: %r], name[msg]]]
return[None]
variable[handler] assign[=] call[name[self].monitor_handlers.get, parameter[name[switch], constant[None]]]
if compare[name[handler] is_not constant[None]] begin[:]
call[name[handler], parameter[name[idents], name[msg]]] | keyword[def] identifier[dispatch_monitor_traffic] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[msg] [ literal[int] ])
identifier[switch] = identifier[msg] [ literal[int] ]
keyword[try] :
identifier[idents] , identifier[msg] = identifier[self] . identifier[session] . identifier[feed_identities] ( identifier[msg] [ literal[int] :])
keyword[except] identifier[ValueError] :
identifier[idents] =[]
keyword[if] keyword[not] identifier[idents] :
identifier[self] . identifier[log] . identifier[error] ( literal[string] , identifier[msg] )
keyword[return]
identifier[handler] = identifier[self] . identifier[monitor_handlers] . identifier[get] ( identifier[switch] , keyword[None] )
keyword[if] identifier[handler] keyword[is] keyword[not] keyword[None] :
identifier[handler] ( identifier[idents] , identifier[msg] )
keyword[else] :
identifier[self] . identifier[log] . identifier[error] ( literal[string] , identifier[switch] ) | def dispatch_monitor_traffic(self, msg):
"""all ME and Task queue messages come through here, as well as
IOPub traffic."""
self.log.debug('monitor traffic: %r', msg[0])
switch = msg[0]
try:
(idents, msg) = self.session.feed_identities(msg[1:]) # depends on [control=['try'], data=[]]
except ValueError:
idents = [] # depends on [control=['except'], data=[]]
if not idents:
self.log.error('Monitor message without topic: %r', msg)
return # depends on [control=['if'], data=[]]
handler = self.monitor_handlers.get(switch, None)
if handler is not None:
handler(idents, msg) # depends on [control=['if'], data=['handler']]
else:
self.log.error('Unrecognized monitor topic: %r', switch) |
def shutdown(self, *args):
"""
Shutdown the running process and the monitor
"""
try:
self._shutdown()
if self.process:
self.process.wait()
self.process.stdout.close()
self.process.stdin.close()
self.process.stderr.close()
self.finished = True
self.send_testcase('', '127.0.0.1', self.config.ports["servers"]["TCASE_PORT"])
self.logger.debug("[{0}] - PJFProcessMonitor successfully completed".format(time.strftime("%H:%M:%S")))
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e)) | def function[shutdown, parameter[self]]:
constant[
Shutdown the running process and the monitor
]
<ast.Try object at 0x7da1b0314dc0> | keyword[def] identifier[shutdown] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[try] :
identifier[self] . identifier[_shutdown] ()
keyword[if] identifier[self] . identifier[process] :
identifier[self] . identifier[process] . identifier[wait] ()
identifier[self] . identifier[process] . identifier[stdout] . identifier[close] ()
identifier[self] . identifier[process] . identifier[stdin] . identifier[close] ()
identifier[self] . identifier[process] . identifier[stderr] . identifier[close] ()
identifier[self] . identifier[finished] = keyword[True]
identifier[self] . identifier[send_testcase] ( literal[string] , literal[string] , identifier[self] . identifier[config] . identifier[ports] [ literal[string] ][ literal[string] ])
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[time] . identifier[strftime] ( literal[string] )))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[PJFBaseException] ( identifier[e] . identifier[message] keyword[if] identifier[hasattr] ( identifier[e] , literal[string] ) keyword[else] identifier[str] ( identifier[e] )) | def shutdown(self, *args):
"""
Shutdown the running process and the monitor
"""
try:
self._shutdown()
if self.process:
self.process.wait()
self.process.stdout.close()
self.process.stdin.close()
self.process.stderr.close() # depends on [control=['if'], data=[]]
self.finished = True
self.send_testcase('', '127.0.0.1', self.config.ports['servers']['TCASE_PORT'])
self.logger.debug('[{0}] - PJFProcessMonitor successfully completed'.format(time.strftime('%H:%M:%S'))) # depends on [control=['try'], data=[]]
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, 'message') else str(e)) # depends on [control=['except'], data=['e']] |
def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs):
'''
Create a snapshot from the given instance
.. versionadded:: 2016.3.0
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs):
ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name)
ret['result'] = False
return ret
ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name)
ret['changes']['new'] = {ami_name: ami_name}
if not wait_until_available:
return ret
starttime = time()
while True:
images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs)
if images and images[0].state == 'available':
break
if time() - starttime > wait_timeout_seconds:
if images:
ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state)
else:
ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name)
ret['result'] = False
return ret
sleep(5)
return ret | def function[snapshot_created, parameter[name, ami_name, instance_name, wait_until_available, wait_timeout_seconds]]:
constant[
Create a snapshot from the given instance
.. versionadded:: 2016.3.0
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da207f00c10>, <ast.Constant object at 0x7da207f01ff0>, <ast.Constant object at 0x7da207f02200>, <ast.Constant object at 0x7da207f01cc0>], [<ast.Name object at 0x7da207f02470>, <ast.Constant object at 0x7da207f00760>, <ast.Constant object at 0x7da207f030a0>, <ast.Dict object at 0x7da207f02530>]]
if <ast.UnaryOp object at 0x7da207f02dd0> begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Failed to create new AMI {ami_name}].format, parameter[]]
call[name[ret]][constant[result]] assign[=] constant[False]
return[name[ret]]
call[name[ret]][constant[comment]] assign[=] call[constant[Created new AMI {ami_name}].format, parameter[]]
call[call[name[ret]][constant[changes]]][constant[new]] assign[=] dictionary[[<ast.Name object at 0x7da1b21e1090>], [<ast.Name object at 0x7da1b21e11e0>]]
if <ast.UnaryOp object at 0x7da1b21e0df0> begin[:]
return[name[ret]]
variable[starttime] assign[=] call[name[time], parameter[]]
while constant[True] begin[:]
variable[images] assign[=] call[call[name[__salt__]][constant[boto_ec2.find_images]], parameter[]]
if <ast.BoolOp object at 0x7da1b21e3b50> begin[:]
break
if compare[binary_operation[call[name[time], parameter[]] - name[starttime]] greater[>] name[wait_timeout_seconds]] begin[:]
if name[images] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[AMI still in state {state} after timeout].format, parameter[]]
call[name[ret]][constant[result]] assign[=] constant[False]
return[name[ret]]
call[name[sleep], parameter[constant[5]]]
return[name[ret]] | keyword[def] identifier[snapshot_created] ( identifier[name] , identifier[ami_name] , identifier[instance_name] , identifier[wait_until_available] = keyword[True] , identifier[wait_timeout_seconds] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] : keyword[True] ,
literal[string] : literal[string] ,
literal[string] :{}
}
keyword[if] keyword[not] identifier[__salt__] [ literal[string] ]( identifier[ami_name] = identifier[ami_name] , identifier[instance_name] = identifier[instance_name] ,** identifier[kwargs] ):
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[ami_name] = identifier[ami_name] )
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[ami_name] = identifier[ami_name] )
identifier[ret] [ literal[string] ][ literal[string] ]={ identifier[ami_name] : identifier[ami_name] }
keyword[if] keyword[not] identifier[wait_until_available] :
keyword[return] identifier[ret]
identifier[starttime] = identifier[time] ()
keyword[while] keyword[True] :
identifier[images] = identifier[__salt__] [ literal[string] ]( identifier[ami_name] = identifier[ami_name] , identifier[return_objs] = keyword[True] ,** identifier[kwargs] )
keyword[if] identifier[images] keyword[and] identifier[images] [ literal[int] ]. identifier[state] == literal[string] :
keyword[break]
keyword[if] identifier[time] ()- identifier[starttime] > identifier[wait_timeout_seconds] :
keyword[if] identifier[images] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[state] = identifier[images] [ literal[int] ]. identifier[state] )
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[ami_name] = identifier[ami_name] )
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
identifier[sleep] ( literal[int] )
keyword[return] identifier[ret] | def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs):
"""
Create a snapshot from the given instance
.. versionadded:: 2016.3.0
"""
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs):
ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name)
ret['result'] = False
return ret # depends on [control=['if'], data=[]]
ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name)
ret['changes']['new'] = {ami_name: ami_name}
if not wait_until_available:
return ret # depends on [control=['if'], data=[]]
starttime = time()
while True:
images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs)
if images and images[0].state == 'available':
break # depends on [control=['if'], data=[]]
if time() - starttime > wait_timeout_seconds:
if images:
ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name)
ret['result'] = False
return ret # depends on [control=['if'], data=[]]
sleep(5) # depends on [control=['while'], data=[]]
return ret |
def isPythonFile(filename):
"""Return True if filename points to a Python file."""
if filename.endswith('.py'):
return True
# Avoid obvious Emacs backup files
if filename.endswith("~"):
return False
max_bytes = 128
try:
with open(filename, 'rb') as f:
text = f.read(max_bytes)
if not text:
return False
except IOError:
return False
first_line = text.splitlines()[0]
return PYTHON_SHEBANG_REGEX.match(first_line) | def function[isPythonFile, parameter[filename]]:
constant[Return True if filename points to a Python file.]
if call[name[filename].endswith, parameter[constant[.py]]] begin[:]
return[constant[True]]
if call[name[filename].endswith, parameter[constant[~]]] begin[:]
return[constant[False]]
variable[max_bytes] assign[=] constant[128]
<ast.Try object at 0x7da1b18d9f60>
variable[first_line] assign[=] call[call[name[text].splitlines, parameter[]]][constant[0]]
return[call[name[PYTHON_SHEBANG_REGEX].match, parameter[name[first_line]]]] | keyword[def] identifier[isPythonFile] ( identifier[filename] ):
literal[string]
keyword[if] identifier[filename] . identifier[endswith] ( literal[string] ):
keyword[return] keyword[True]
keyword[if] identifier[filename] . identifier[endswith] ( literal[string] ):
keyword[return] keyword[False]
identifier[max_bytes] = literal[int]
keyword[try] :
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[text] = identifier[f] . identifier[read] ( identifier[max_bytes] )
keyword[if] keyword[not] identifier[text] :
keyword[return] keyword[False]
keyword[except] identifier[IOError] :
keyword[return] keyword[False]
identifier[first_line] = identifier[text] . identifier[splitlines] ()[ literal[int] ]
keyword[return] identifier[PYTHON_SHEBANG_REGEX] . identifier[match] ( identifier[first_line] ) | def isPythonFile(filename):
"""Return True if filename points to a Python file."""
if filename.endswith('.py'):
return True # depends on [control=['if'], data=[]]
# Avoid obvious Emacs backup files
if filename.endswith('~'):
return False # depends on [control=['if'], data=[]]
max_bytes = 128
try:
with open(filename, 'rb') as f:
text = f.read(max_bytes)
if not text:
return False # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except IOError:
return False # depends on [control=['except'], data=[]]
first_line = text.splitlines()[0]
return PYTHON_SHEBANG_REGEX.match(first_line) |
def FindAll(params, ctxt, scope, stream, coord, interp):
"""
This function converts the argument data into a set of hex bytes
and then searches the current file for all occurrences of those
bytes. data may be any of the basic types or an array of one of
the types. If data is an array of signed bytes, it is assumed to
be a null-terminated string. To search for an array of hex bytes,
create an unsigned char array and fill it with the target value. If
the type being search for is a string, the matchcase and wholeworld
arguments can be used to control the search (see Using Find for more
information). method controls which search method is used from the
following options:
FINDMETHOD_NORMAL=0 - a normal search
FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?'
FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions
wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file.
The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file:
"""
matches_iter = _find_helper(params, ctxt, scope, stream, coord, interp)
matches = list(matches_iter)
types = interp.get_types()
res = types.TFindResults()
res.count = len(matches)
# python3 map doesn't return a list
starts = list(map(lambda m: m.start()+FIND_MATCHES_START_OFFSET, matches))
res.start = starts
# python3 map doesn't return a list
sizes = list(map(lambda m: m.end()-m.start(), matches))
res.size = sizes
return res | def function[FindAll, parameter[params, ctxt, scope, stream, coord, interp]]:
constant[
This function converts the argument data into a set of hex bytes
and then searches the current file for all occurrences of those
bytes. data may be any of the basic types or an array of one of
the types. If data is an array of signed bytes, it is assumed to
be a null-terminated string. To search for an array of hex bytes,
create an unsigned char array and fill it with the target value. If
the type being search for is a string, the matchcase and wholeworld
arguments can be used to control the search (see Using Find for more
information). method controls which search method is used from the
following options:
FINDMETHOD_NORMAL=0 - a normal search
FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?'
FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions
wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file.
The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file:
]
variable[matches_iter] assign[=] call[name[_find_helper], parameter[name[params], name[ctxt], name[scope], name[stream], name[coord], name[interp]]]
variable[matches] assign[=] call[name[list], parameter[name[matches_iter]]]
variable[types] assign[=] call[name[interp].get_types, parameter[]]
variable[res] assign[=] call[name[types].TFindResults, parameter[]]
name[res].count assign[=] call[name[len], parameter[name[matches]]]
variable[starts] assign[=] call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b10d6560>, name[matches]]]]]
name[res].start assign[=] name[starts]
variable[sizes] assign[=] call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b10d4730>, name[matches]]]]]
name[res].size assign[=] name[sizes]
return[name[res]] | keyword[def] identifier[FindAll] ( identifier[params] , identifier[ctxt] , identifier[scope] , identifier[stream] , identifier[coord] , identifier[interp] ):
literal[string]
identifier[matches_iter] = identifier[_find_helper] ( identifier[params] , identifier[ctxt] , identifier[scope] , identifier[stream] , identifier[coord] , identifier[interp] )
identifier[matches] = identifier[list] ( identifier[matches_iter] )
identifier[types] = identifier[interp] . identifier[get_types] ()
identifier[res] = identifier[types] . identifier[TFindResults] ()
identifier[res] . identifier[count] = identifier[len] ( identifier[matches] )
identifier[starts] = identifier[list] ( identifier[map] ( keyword[lambda] identifier[m] : identifier[m] . identifier[start] ()+ identifier[FIND_MATCHES_START_OFFSET] , identifier[matches] ))
identifier[res] . identifier[start] = identifier[starts]
identifier[sizes] = identifier[list] ( identifier[map] ( keyword[lambda] identifier[m] : identifier[m] . identifier[end] ()- identifier[m] . identifier[start] (), identifier[matches] ))
identifier[res] . identifier[size] = identifier[sizes]
keyword[return] identifier[res] | def FindAll(params, ctxt, scope, stream, coord, interp):
"""
This function converts the argument data into a set of hex bytes
and then searches the current file for all occurrences of those
bytes. data may be any of the basic types or an array of one of
the types. If data is an array of signed bytes, it is assumed to
be a null-terminated string. To search for an array of hex bytes,
create an unsigned char array and fill it with the target value. If
the type being search for is a string, the matchcase and wholeworld
arguments can be used to control the search (see Using Find for more
information). method controls which search method is used from the
following options:
FINDMETHOD_NORMAL=0 - a normal search
FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?'
FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions
wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file.
The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file:
"""
matches_iter = _find_helper(params, ctxt, scope, stream, coord, interp)
matches = list(matches_iter)
types = interp.get_types()
res = types.TFindResults()
res.count = len(matches)
# python3 map doesn't return a list
starts = list(map(lambda m: m.start() + FIND_MATCHES_START_OFFSET, matches))
res.start = starts
# python3 map doesn't return a list
sizes = list(map(lambda m: m.end() - m.start(), matches))
res.size = sizes
return res |
def first_line(path):
"""
:param str|None path: Path to file
:return str|None: First line of file, if any
"""
try:
with io.open(path, "rt", errors="ignore") as fh:
return fh.readline().strip()
except (IOError, TypeError):
return None | def function[first_line, parameter[path]]:
constant[
:param str|None path: Path to file
:return str|None: First line of file, if any
]
<ast.Try object at 0x7da1b23500d0> | keyword[def] identifier[first_line] ( identifier[path] ):
literal[string]
keyword[try] :
keyword[with] identifier[io] . identifier[open] ( identifier[path] , literal[string] , identifier[errors] = literal[string] ) keyword[as] identifier[fh] :
keyword[return] identifier[fh] . identifier[readline] (). identifier[strip] ()
keyword[except] ( identifier[IOError] , identifier[TypeError] ):
keyword[return] keyword[None] | def first_line(path):
"""
:param str|None path: Path to file
:return str|None: First line of file, if any
"""
try:
with io.open(path, 'rt', errors='ignore') as fh:
return fh.readline().strip() # depends on [control=['with'], data=['fh']] # depends on [control=['try'], data=[]]
except (IOError, TypeError):
return None # depends on [control=['except'], data=[]] |
def get_qpimage_raw(self, idx):
"""Return QPImage without background correction"""
ds = self._get_dataset(idx)
qpi = ds.get_qpimage_raw()
qpi["identifier"] = self.get_identifier(idx)
return qpi | def function[get_qpimage_raw, parameter[self, idx]]:
constant[Return QPImage without background correction]
variable[ds] assign[=] call[name[self]._get_dataset, parameter[name[idx]]]
variable[qpi] assign[=] call[name[ds].get_qpimage_raw, parameter[]]
call[name[qpi]][constant[identifier]] assign[=] call[name[self].get_identifier, parameter[name[idx]]]
return[name[qpi]] | keyword[def] identifier[get_qpimage_raw] ( identifier[self] , identifier[idx] ):
literal[string]
identifier[ds] = identifier[self] . identifier[_get_dataset] ( identifier[idx] )
identifier[qpi] = identifier[ds] . identifier[get_qpimage_raw] ()
identifier[qpi] [ literal[string] ]= identifier[self] . identifier[get_identifier] ( identifier[idx] )
keyword[return] identifier[qpi] | def get_qpimage_raw(self, idx):
"""Return QPImage without background correction"""
ds = self._get_dataset(idx)
qpi = ds.get_qpimage_raw()
qpi['identifier'] = self.get_identifier(idx)
return qpi |
def refresh_db(root=None, **kwargs):
'''
Just run a ``pacman -Sy``, return a dict::
{'<database name>': Bool}
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
cmd = ['pacman', '-Sy']
if root is not None:
cmd.extend(('-r', root))
ret = {}
call = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
env={'LANG': 'C'},
python_shell=False)
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += ': ' + call['stderr']
raise CommandExecutionError(
'Error refreshing package database' + comment
)
else:
out = call['stdout']
for line in salt.utils.itertools.split(out, '\n'):
if line.strip().startswith('::'):
continue
if not line:
continue
key = line.strip().split()[0]
if 'is up to date' in line:
ret[key] = False
elif 'downloading' in line:
key = line.strip().split()[1].split('.')[0]
ret[key] = True
return ret | def function[refresh_db, parameter[root]]:
constant[
Just run a ``pacman -Sy``, return a dict::
{'<database name>': Bool}
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
]
call[name[salt].utils.pkg.clear_rtag, parameter[name[__opts__]]]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b2135810>, <ast.Constant object at 0x7da1b2136c50>]]
if compare[name[root] is_not constant[None]] begin[:]
call[name[cmd].extend, parameter[tuple[[<ast.Constant object at 0x7da1b2134820>, <ast.Name object at 0x7da1b2134c70>]]]]
variable[ret] assign[=] dictionary[[], []]
variable[call] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
if compare[call[name[call]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
variable[comment] assign[=] constant[]
if compare[constant[stderr] in name[call]] begin[:]
<ast.AugAssign object at 0x7da1b21365c0>
<ast.Raise object at 0x7da1b2136470>
for taget[name[line]] in starred[call[name[salt].utils.itertools.split, parameter[name[out], constant[
]]]] begin[:]
if call[call[name[line].strip, parameter[]].startswith, parameter[constant[::]]] begin[:]
continue
if <ast.UnaryOp object at 0x7da1b2134340> begin[:]
continue
variable[key] assign[=] call[call[call[name[line].strip, parameter[]].split, parameter[]]][constant[0]]
if compare[constant[is up to date] in name[line]] begin[:]
call[name[ret]][name[key]] assign[=] constant[False]
return[name[ret]] | keyword[def] identifier[refresh_db] ( identifier[root] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[salt] . identifier[utils] . identifier[pkg] . identifier[clear_rtag] ( identifier[__opts__] )
identifier[cmd] =[ literal[string] , literal[string] ]
keyword[if] identifier[root] keyword[is] keyword[not] keyword[None] :
identifier[cmd] . identifier[extend] (( literal[string] , identifier[root] ))
identifier[ret] ={}
identifier[call] = identifier[__salt__] [ literal[string] ]( identifier[cmd] ,
identifier[output_loglevel] = literal[string] ,
identifier[env] ={ literal[string] : literal[string] },
identifier[python_shell] = keyword[False] )
keyword[if] identifier[call] [ literal[string] ]!= literal[int] :
identifier[comment] = literal[string]
keyword[if] literal[string] keyword[in] identifier[call] :
identifier[comment] += literal[string] + identifier[call] [ literal[string] ]
keyword[raise] identifier[CommandExecutionError] (
literal[string] + identifier[comment]
)
keyword[else] :
identifier[out] = identifier[call] [ literal[string] ]
keyword[for] identifier[line] keyword[in] identifier[salt] . identifier[utils] . identifier[itertools] . identifier[split] ( identifier[out] , literal[string] ):
keyword[if] identifier[line] . identifier[strip] (). identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[if] keyword[not] identifier[line] :
keyword[continue]
identifier[key] = identifier[line] . identifier[strip] (). identifier[split] ()[ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[ret] [ identifier[key] ]= keyword[False]
keyword[elif] literal[string] keyword[in] identifier[line] :
identifier[key] = identifier[line] . identifier[strip] (). identifier[split] ()[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[ret] [ identifier[key] ]= keyword[True]
keyword[return] identifier[ret] | def refresh_db(root=None, **kwargs):
"""
Just run a ``pacman -Sy``, return a dict::
{'<database name>': Bool}
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
"""
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
cmd = ['pacman', '-Sy']
if root is not None:
cmd.extend(('-r', root)) # depends on [control=['if'], data=['root']]
ret = {}
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', env={'LANG': 'C'}, python_shell=False)
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += ': ' + call['stderr'] # depends on [control=['if'], data=['call']]
raise CommandExecutionError('Error refreshing package database' + comment) # depends on [control=['if'], data=[]]
else:
out = call['stdout']
for line in salt.utils.itertools.split(out, '\n'):
if line.strip().startswith('::'):
continue # depends on [control=['if'], data=[]]
if not line:
continue # depends on [control=['if'], data=[]]
key = line.strip().split()[0]
if 'is up to date' in line:
ret[key] = False # depends on [control=['if'], data=[]]
elif 'downloading' in line:
key = line.strip().split()[1].split('.')[0]
ret[key] = True # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']]
return ret |
def init_config(self):
"""Patch input.nml as a new or restart run."""
input_fpath = os.path.join(self.work_path, 'input.nml')
input_nml = f90nml.read(input_fpath)
if self.expt.counter == 0 or self.expt.repeat_run:
input_type = 'n'
else:
input_type = 'r'
input_nml['MOM_input_nml']['input_filename'] = input_type
f90nml.write(input_nml, input_fpath, force=True) | def function[init_config, parameter[self]]:
constant[Patch input.nml as a new or restart run.]
variable[input_fpath] assign[=] call[name[os].path.join, parameter[name[self].work_path, constant[input.nml]]]
variable[input_nml] assign[=] call[name[f90nml].read, parameter[name[input_fpath]]]
if <ast.BoolOp object at 0x7da1b04fc970> begin[:]
variable[input_type] assign[=] constant[n]
call[call[name[input_nml]][constant[MOM_input_nml]]][constant[input_filename]] assign[=] name[input_type]
call[name[f90nml].write, parameter[name[input_nml], name[input_fpath]]] | keyword[def] identifier[init_config] ( identifier[self] ):
literal[string]
identifier[input_fpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[work_path] , literal[string] )
identifier[input_nml] = identifier[f90nml] . identifier[read] ( identifier[input_fpath] )
keyword[if] identifier[self] . identifier[expt] . identifier[counter] == literal[int] keyword[or] identifier[self] . identifier[expt] . identifier[repeat_run] :
identifier[input_type] = literal[string]
keyword[else] :
identifier[input_type] = literal[string]
identifier[input_nml] [ literal[string] ][ literal[string] ]= identifier[input_type]
identifier[f90nml] . identifier[write] ( identifier[input_nml] , identifier[input_fpath] , identifier[force] = keyword[True] ) | def init_config(self):
"""Patch input.nml as a new or restart run."""
input_fpath = os.path.join(self.work_path, 'input.nml')
input_nml = f90nml.read(input_fpath)
if self.expt.counter == 0 or self.expt.repeat_run:
input_type = 'n' # depends on [control=['if'], data=[]]
else:
input_type = 'r'
input_nml['MOM_input_nml']['input_filename'] = input_type
f90nml.write(input_nml, input_fpath, force=True) |
def peek(self) -> str:
"""Return the next character without advancing offset.
Raises:
EndOfInput: If past the end of `self.input`.
"""
try:
return self.input[self.offset]
except IndexError:
raise EndOfInput(self) | def function[peek, parameter[self]]:
constant[Return the next character without advancing offset.
Raises:
EndOfInput: If past the end of `self.input`.
]
<ast.Try object at 0x7da1b052b4f0> | keyword[def] identifier[peek] ( identifier[self] )-> identifier[str] :
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[input] [ identifier[self] . identifier[offset] ]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[EndOfInput] ( identifier[self] ) | def peek(self) -> str:
"""Return the next character without advancing offset.
Raises:
EndOfInput: If past the end of `self.input`.
"""
try:
return self.input[self.offset] # depends on [control=['try'], data=[]]
except IndexError:
raise EndOfInput(self) # depends on [control=['except'], data=[]] |
def setRecord(self, record):
"""
Sets the record instance linked with this widget.
:param record | <orb.Table>
"""
self._record = record
if record is not None:
self.loadValues(record.recordValues(autoInflate=True))
else:
self.loadValues({}) | def function[setRecord, parameter[self, record]]:
constant[
Sets the record instance linked with this widget.
:param record | <orb.Table>
]
name[self]._record assign[=] name[record]
if compare[name[record] is_not constant[None]] begin[:]
call[name[self].loadValues, parameter[call[name[record].recordValues, parameter[]]]] | keyword[def] identifier[setRecord] ( identifier[self] , identifier[record] ):
literal[string]
identifier[self] . identifier[_record] = identifier[record]
keyword[if] identifier[record] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[loadValues] ( identifier[record] . identifier[recordValues] ( identifier[autoInflate] = keyword[True] ))
keyword[else] :
identifier[self] . identifier[loadValues] ({}) | def setRecord(self, record):
"""
Sets the record instance linked with this widget.
:param record | <orb.Table>
"""
self._record = record
if record is not None:
self.loadValues(record.recordValues(autoInflate=True)) # depends on [control=['if'], data=['record']]
else:
self.loadValues({}) |
def list(self, path):
"""GET /<path>?list=true
:param path:
:type path:
:return:
:rtype:
"""
try:
payload = {
'list': True
}
return self._adapter.get('/v1/{0}'.format(path), params=payload).json()
except exceptions.InvalidPath:
return None | def function[list, parameter[self, path]]:
constant[GET /<path>?list=true
:param path:
:type path:
:return:
:rtype:
]
<ast.Try object at 0x7da1b235ab30> | keyword[def] identifier[list] ( identifier[self] , identifier[path] ):
literal[string]
keyword[try] :
identifier[payload] ={
literal[string] : keyword[True]
}
keyword[return] identifier[self] . identifier[_adapter] . identifier[get] ( literal[string] . identifier[format] ( identifier[path] ), identifier[params] = identifier[payload] ). identifier[json] ()
keyword[except] identifier[exceptions] . identifier[InvalidPath] :
keyword[return] keyword[None] | def list(self, path):
"""GET /<path>?list=true
:param path:
:type path:
:return:
:rtype:
"""
try:
payload = {'list': True}
return self._adapter.get('/v1/{0}'.format(path), params=payload).json() # depends on [control=['try'], data=[]]
except exceptions.InvalidPath:
return None # depends on [control=['except'], data=[]] |
def parse_prefix(prefix, default_length=128):
"""
Splits the given IP prefix into a network address and a prefix length.
If the prefix does not have a length (i.e., it is a simple IP address),
it is presumed to have the given default length.
:type prefix: string
:param prefix: An IP mask.
:type default_length: long
:param default_length: The default ip prefix length.
:rtype: string, int
:return: A tuple containing the IP address and prefix length.
"""
if '/' in prefix:
network, pfxlen = prefix.split('/')
else:
network = prefix
pfxlen = default_length
return network, int(pfxlen) | def function[parse_prefix, parameter[prefix, default_length]]:
constant[
Splits the given IP prefix into a network address and a prefix length.
If the prefix does not have a length (i.e., it is a simple IP address),
it is presumed to have the given default length.
:type prefix: string
:param prefix: An IP mask.
:type default_length: long
:param default_length: The default ip prefix length.
:rtype: string, int
:return: A tuple containing the IP address and prefix length.
]
if compare[constant[/] in name[prefix]] begin[:]
<ast.Tuple object at 0x7da1b06530d0> assign[=] call[name[prefix].split, parameter[constant[/]]]
return[tuple[[<ast.Name object at 0x7da1b0652e60>, <ast.Call object at 0x7da1b0652830>]]] | keyword[def] identifier[parse_prefix] ( identifier[prefix] , identifier[default_length] = literal[int] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[prefix] :
identifier[network] , identifier[pfxlen] = identifier[prefix] . identifier[split] ( literal[string] )
keyword[else] :
identifier[network] = identifier[prefix]
identifier[pfxlen] = identifier[default_length]
keyword[return] identifier[network] , identifier[int] ( identifier[pfxlen] ) | def parse_prefix(prefix, default_length=128):
"""
Splits the given IP prefix into a network address and a prefix length.
If the prefix does not have a length (i.e., it is a simple IP address),
it is presumed to have the given default length.
:type prefix: string
:param prefix: An IP mask.
:type default_length: long
:param default_length: The default ip prefix length.
:rtype: string, int
:return: A tuple containing the IP address and prefix length.
"""
if '/' in prefix:
(network, pfxlen) = prefix.split('/') # depends on [control=['if'], data=['prefix']]
else:
network = prefix
pfxlen = default_length
return (network, int(pfxlen)) |
def entity(self, entity_type, identifier=None):
"""Factory method for creating an Entity.
If an entity with the same type and identifier already exists,
this will return a reference to that entity. If not, it will
create a new one and add it to the list of known entities for
this ACL.
:type entity_type: str
:param entity_type: The type of entity to create
(ie, ``user``, ``group``, etc)
:type identifier: str
:param identifier: The ID of the entity (if applicable).
This can be either an ID or an e-mail address.
:rtype: :class:`_ACLEntity`
:returns: A new Entity or a reference to an existing identical entity.
"""
entity = _ACLEntity(entity_type=entity_type, identifier=identifier)
if self.has_entity(entity):
entity = self.get_entity(entity)
else:
self.add_entity(entity)
return entity | def function[entity, parameter[self, entity_type, identifier]]:
constant[Factory method for creating an Entity.
If an entity with the same type and identifier already exists,
this will return a reference to that entity. If not, it will
create a new one and add it to the list of known entities for
this ACL.
:type entity_type: str
:param entity_type: The type of entity to create
(ie, ``user``, ``group``, etc)
:type identifier: str
:param identifier: The ID of the entity (if applicable).
This can be either an ID or an e-mail address.
:rtype: :class:`_ACLEntity`
:returns: A new Entity or a reference to an existing identical entity.
]
variable[entity] assign[=] call[name[_ACLEntity], parameter[]]
if call[name[self].has_entity, parameter[name[entity]]] begin[:]
variable[entity] assign[=] call[name[self].get_entity, parameter[name[entity]]]
return[name[entity]] | keyword[def] identifier[entity] ( identifier[self] , identifier[entity_type] , identifier[identifier] = keyword[None] ):
literal[string]
identifier[entity] = identifier[_ACLEntity] ( identifier[entity_type] = identifier[entity_type] , identifier[identifier] = identifier[identifier] )
keyword[if] identifier[self] . identifier[has_entity] ( identifier[entity] ):
identifier[entity] = identifier[self] . identifier[get_entity] ( identifier[entity] )
keyword[else] :
identifier[self] . identifier[add_entity] ( identifier[entity] )
keyword[return] identifier[entity] | def entity(self, entity_type, identifier=None):
"""Factory method for creating an Entity.
If an entity with the same type and identifier already exists,
this will return a reference to that entity. If not, it will
create a new one and add it to the list of known entities for
this ACL.
:type entity_type: str
:param entity_type: The type of entity to create
(ie, ``user``, ``group``, etc)
:type identifier: str
:param identifier: The ID of the entity (if applicable).
This can be either an ID or an e-mail address.
:rtype: :class:`_ACLEntity`
:returns: A new Entity or a reference to an existing identical entity.
"""
entity = _ACLEntity(entity_type=entity_type, identifier=identifier)
if self.has_entity(entity):
entity = self.get_entity(entity) # depends on [control=['if'], data=[]]
else:
self.add_entity(entity)
return entity |
def mscoco_generator(data_dir,
tmp_dir,
training,
how_many,
start_from=0,
eos_list=None,
vocab_filename=None):
"""Image generator for MSCOCO captioning problem with token-wise captions.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
eos_list: optional list of end of sentence tokens, otherwise use default
value `1`.
vocab_filename: file within `tmp_dir` to read vocabulary from.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
* image/class/label: a list of integers representing the caption,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a list of the corresponding type.
"""
eos_list = [1] if eos_list is None else eos_list
def get_vocab():
"""Get vocab for caption text encoder."""
if data_dir is not None and vocab_filename is not None:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath)
return vocab_symbolizer
else:
raise ValueError("Vocab file does not exist: %s" % vocab_filepath)
return None
vocab_symbolizer = get_vocab()
_get_mscoco(tmp_dir)
caption_filepath = (
_MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE)
caption_filepath = os.path.join(tmp_dir, caption_filepath)
prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX
caption_file = io.open(caption_filepath)
caption_json = json.load(caption_file)
# Dictionary from image_id to ((filename, height, width), captions).
image_dict = {}
for image in caption_json["images"]:
image_dict[image["id"]] = [(image["file_name"], image["height"],
image["width"]), []]
annotations = caption_json["annotations"]
annotation_count = len(annotations)
image_count = len(image_dict)
tf.logging.info("Processing %d images and %d labels\n" % (image_count,
annotation_count))
for annotation in annotations:
image_id = annotation["image_id"]
image_dict[image_id][1].append(annotation["caption"])
data = list(image_dict.values())[start_from:start_from + how_many]
random.shuffle(data)
for image_info, labels in data:
image_filename = image_info[0]
image_filepath = os.path.join(tmp_dir, prefix, image_filename)
with tf.gfile.Open(image_filepath, "rb") as f:
encoded_image_data = f.read()
height, width = image_info[1], image_info[2]
for label in labels:
if vocab_filename is None or vocab_symbolizer is None:
label = [ord(c) for c in label] + eos_list
else:
label = vocab_symbolizer.encode(label) + eos_list
yield {
"image/encoded": [encoded_image_data],
"image/format": ["jpeg"],
"image/class/label": label,
"image/height": [height],
"image/width": [width]
} | def function[mscoco_generator, parameter[data_dir, tmp_dir, training, how_many, start_from, eos_list, vocab_filename]]:
constant[Image generator for MSCOCO captioning problem with token-wise captions.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
eos_list: optional list of end of sentence tokens, otherwise use default
value `1`.
vocab_filename: file within `tmp_dir` to read vocabulary from.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
* image/class/label: a list of integers representing the caption,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a list of the corresponding type.
]
variable[eos_list] assign[=] <ast.IfExp object at 0x7da2047e85b0>
def function[get_vocab, parameter[]]:
constant[Get vocab for caption text encoder.]
if <ast.BoolOp object at 0x7da2047eb370> begin[:]
variable[vocab_filepath] assign[=] call[name[os].path.join, parameter[name[data_dir], name[vocab_filename]]]
if call[name[tf].gfile.Exists, parameter[name[vocab_filepath]]] begin[:]
call[name[tf].logging.info, parameter[constant[Found vocab file: %s], name[vocab_filepath]]]
variable[vocab_symbolizer] assign[=] call[name[text_encoder].SubwordTextEncoder, parameter[name[vocab_filepath]]]
return[name[vocab_symbolizer]]
return[constant[None]]
variable[vocab_symbolizer] assign[=] call[name[get_vocab], parameter[]]
call[name[_get_mscoco], parameter[name[tmp_dir]]]
variable[caption_filepath] assign[=] <ast.IfExp object at 0x7da2047eb1c0>
variable[caption_filepath] assign[=] call[name[os].path.join, parameter[name[tmp_dir], name[caption_filepath]]]
variable[prefix] assign[=] <ast.IfExp object at 0x7da2047e8c10>
variable[caption_file] assign[=] call[name[io].open, parameter[name[caption_filepath]]]
variable[caption_json] assign[=] call[name[json].load, parameter[name[caption_file]]]
variable[image_dict] assign[=] dictionary[[], []]
for taget[name[image]] in starred[call[name[caption_json]][constant[images]]] begin[:]
call[name[image_dict]][call[name[image]][constant[id]]] assign[=] list[[<ast.Tuple object at 0x7da2047ea8c0>, <ast.List object at 0x7da2047ea710>]]
variable[annotations] assign[=] call[name[caption_json]][constant[annotations]]
variable[annotation_count] assign[=] call[name[len], parameter[name[annotations]]]
variable[image_count] assign[=] call[name[len], parameter[name[image_dict]]]
call[name[tf].logging.info, parameter[binary_operation[constant[Processing %d images and %d labels
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2047e8c40>, <ast.Name object at 0x7da2047e8730>]]]]]
for taget[name[annotation]] in starred[name[annotations]] begin[:]
variable[image_id] assign[=] call[name[annotation]][constant[image_id]]
call[call[call[name[image_dict]][name[image_id]]][constant[1]].append, parameter[call[name[annotation]][constant[caption]]]]
variable[data] assign[=] call[call[name[list], parameter[call[name[image_dict].values, parameter[]]]]][<ast.Slice object at 0x7da2047e9fc0>]
call[name[random].shuffle, parameter[name[data]]]
for taget[tuple[[<ast.Name object at 0x7da1b2060400>, <ast.Name object at 0x7da1b2062b60>]]] in starred[name[data]] begin[:]
variable[image_filename] assign[=] call[name[image_info]][constant[0]]
variable[image_filepath] assign[=] call[name[os].path.join, parameter[name[tmp_dir], name[prefix], name[image_filename]]]
with call[name[tf].gfile.Open, parameter[name[image_filepath], constant[rb]]] begin[:]
variable[encoded_image_data] assign[=] call[name[f].read, parameter[]]
<ast.Tuple object at 0x7da1b2061b10> assign[=] tuple[[<ast.Subscript object at 0x7da1b20615d0>, <ast.Subscript object at 0x7da1b2060ca0>]]
for taget[name[label]] in starred[name[labels]] begin[:]
if <ast.BoolOp object at 0x7da1b20615a0> begin[:]
variable[label] assign[=] binary_operation[<ast.ListComp object at 0x7da1b2062740> + name[eos_list]]
<ast.Yield object at 0x7da1b2061ab0> | keyword[def] identifier[mscoco_generator] ( identifier[data_dir] ,
identifier[tmp_dir] ,
identifier[training] ,
identifier[how_many] ,
identifier[start_from] = literal[int] ,
identifier[eos_list] = keyword[None] ,
identifier[vocab_filename] = keyword[None] ):
literal[string]
identifier[eos_list] =[ literal[int] ] keyword[if] identifier[eos_list] keyword[is] keyword[None] keyword[else] identifier[eos_list]
keyword[def] identifier[get_vocab] ():
literal[string]
keyword[if] identifier[data_dir] keyword[is] keyword[not] keyword[None] keyword[and] identifier[vocab_filename] keyword[is] keyword[not] keyword[None] :
identifier[vocab_filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_dir] , identifier[vocab_filename] )
keyword[if] identifier[tf] . identifier[gfile] . identifier[Exists] ( identifier[vocab_filepath] ):
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] , identifier[vocab_filepath] )
identifier[vocab_symbolizer] = identifier[text_encoder] . identifier[SubwordTextEncoder] ( identifier[vocab_filepath] )
keyword[return] identifier[vocab_symbolizer]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[vocab_filepath] )
keyword[return] keyword[None]
identifier[vocab_symbolizer] = identifier[get_vocab] ()
identifier[_get_mscoco] ( identifier[tmp_dir] )
identifier[caption_filepath] =(
identifier[_MSCOCO_TRAIN_CAPTION_FILE] keyword[if] identifier[training] keyword[else] identifier[_MSCOCO_EVAL_CAPTION_FILE] )
identifier[caption_filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_dir] , identifier[caption_filepath] )
identifier[prefix] = identifier[_MSCOCO_TRAIN_PREFIX] keyword[if] identifier[training] keyword[else] identifier[_MSCOCO_EVAL_PREFIX]
identifier[caption_file] = identifier[io] . identifier[open] ( identifier[caption_filepath] )
identifier[caption_json] = identifier[json] . identifier[load] ( identifier[caption_file] )
identifier[image_dict] ={}
keyword[for] identifier[image] keyword[in] identifier[caption_json] [ literal[string] ]:
identifier[image_dict] [ identifier[image] [ literal[string] ]]=[( identifier[image] [ literal[string] ], identifier[image] [ literal[string] ],
identifier[image] [ literal[string] ]),[]]
identifier[annotations] = identifier[caption_json] [ literal[string] ]
identifier[annotation_count] = identifier[len] ( identifier[annotations] )
identifier[image_count] = identifier[len] ( identifier[image_dict] )
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] %( identifier[image_count] ,
identifier[annotation_count] ))
keyword[for] identifier[annotation] keyword[in] identifier[annotations] :
identifier[image_id] = identifier[annotation] [ literal[string] ]
identifier[image_dict] [ identifier[image_id] ][ literal[int] ]. identifier[append] ( identifier[annotation] [ literal[string] ])
identifier[data] = identifier[list] ( identifier[image_dict] . identifier[values] ())[ identifier[start_from] : identifier[start_from] + identifier[how_many] ]
identifier[random] . identifier[shuffle] ( identifier[data] )
keyword[for] identifier[image_info] , identifier[labels] keyword[in] identifier[data] :
identifier[image_filename] = identifier[image_info] [ literal[int] ]
identifier[image_filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_dir] , identifier[prefix] , identifier[image_filename] )
keyword[with] identifier[tf] . identifier[gfile] . identifier[Open] ( identifier[image_filepath] , literal[string] ) keyword[as] identifier[f] :
identifier[encoded_image_data] = identifier[f] . identifier[read] ()
identifier[height] , identifier[width] = identifier[image_info] [ literal[int] ], identifier[image_info] [ literal[int] ]
keyword[for] identifier[label] keyword[in] identifier[labels] :
keyword[if] identifier[vocab_filename] keyword[is] keyword[None] keyword[or] identifier[vocab_symbolizer] keyword[is] keyword[None] :
identifier[label] =[ identifier[ord] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[label] ]+ identifier[eos_list]
keyword[else] :
identifier[label] = identifier[vocab_symbolizer] . identifier[encode] ( identifier[label] )+ identifier[eos_list]
keyword[yield] {
literal[string] :[ identifier[encoded_image_data] ],
literal[string] :[ literal[string] ],
literal[string] : identifier[label] ,
literal[string] :[ identifier[height] ],
literal[string] :[ identifier[width] ]
} | def mscoco_generator(data_dir, tmp_dir, training, how_many, start_from=0, eos_list=None, vocab_filename=None):
"""Image generator for MSCOCO captioning problem with token-wise captions.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
eos_list: optional list of end of sentence tokens, otherwise use default
value `1`.
vocab_filename: file within `tmp_dir` to read vocabulary from.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
* image/class/label: a list of integers representing the caption,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a list of the corresponding type.
"""
eos_list = [1] if eos_list is None else eos_list
def get_vocab():
"""Get vocab for caption text encoder."""
if data_dir is not None and vocab_filename is not None:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info('Found vocab file: %s', vocab_filepath)
vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath)
return vocab_symbolizer # depends on [control=['if'], data=[]]
else:
raise ValueError('Vocab file does not exist: %s' % vocab_filepath) # depends on [control=['if'], data=[]]
return None
vocab_symbolizer = get_vocab()
_get_mscoco(tmp_dir)
caption_filepath = _MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE
caption_filepath = os.path.join(tmp_dir, caption_filepath)
prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX
caption_file = io.open(caption_filepath)
caption_json = json.load(caption_file)
# Dictionary from image_id to ((filename, height, width), captions).
image_dict = {}
for image in caption_json['images']:
image_dict[image['id']] = [(image['file_name'], image['height'], image['width']), []] # depends on [control=['for'], data=['image']]
annotations = caption_json['annotations']
annotation_count = len(annotations)
image_count = len(image_dict)
tf.logging.info('Processing %d images and %d labels\n' % (image_count, annotation_count))
for annotation in annotations:
image_id = annotation['image_id']
image_dict[image_id][1].append(annotation['caption']) # depends on [control=['for'], data=['annotation']]
data = list(image_dict.values())[start_from:start_from + how_many]
random.shuffle(data)
for (image_info, labels) in data:
image_filename = image_info[0]
image_filepath = os.path.join(tmp_dir, prefix, image_filename)
with tf.gfile.Open(image_filepath, 'rb') as f:
encoded_image_data = f.read()
(height, width) = (image_info[1], image_info[2])
for label in labels:
if vocab_filename is None or vocab_symbolizer is None:
label = [ord(c) for c in label] + eos_list # depends on [control=['if'], data=[]]
else:
label = vocab_symbolizer.encode(label) + eos_list
yield {'image/encoded': [encoded_image_data], 'image/format': ['jpeg'], 'image/class/label': label, 'image/height': [height], 'image/width': [width]} # depends on [control=['for'], data=['label']] # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=[]] |
def validate_matched_parenthesis(cls, format_target):
""" Adapted from https://stackoverflow.com/questions/6701853/parentheses-pairing-issue
:param format_order:
:return:
"""
iparens = iter('(){}[]<>')
parens = dict(zip(iparens, iparens))
closing = parens.values()
def balanced(astr):
stack = []
for c in astr:
d = parens.get(c, None)
if d:
stack.append(d)
elif c in closing:
if not stack or c != stack.pop():
return False
return not stack
if format_target:
if not balanced(format_target):
raise exceptions.BalanceError("Format string has unmatching parentheses.") | def function[validate_matched_parenthesis, parameter[cls, format_target]]:
constant[ Adapted from https://stackoverflow.com/questions/6701853/parentheses-pairing-issue
:param format_order:
:return:
]
variable[iparens] assign[=] call[name[iter], parameter[constant[(){}[]<>]]]
variable[parens] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[iparens], name[iparens]]]]]
variable[closing] assign[=] call[name[parens].values, parameter[]]
def function[balanced, parameter[astr]]:
variable[stack] assign[=] list[[]]
for taget[name[c]] in starred[name[astr]] begin[:]
variable[d] assign[=] call[name[parens].get, parameter[name[c], constant[None]]]
if name[d] begin[:]
call[name[stack].append, parameter[name[d]]]
return[<ast.UnaryOp object at 0x7da2044c0760>]
if name[format_target] begin[:]
if <ast.UnaryOp object at 0x7da2044c1b40> begin[:]
<ast.Raise object at 0x7da2044c2920> | keyword[def] identifier[validate_matched_parenthesis] ( identifier[cls] , identifier[format_target] ):
literal[string]
identifier[iparens] = identifier[iter] ( literal[string] )
identifier[parens] = identifier[dict] ( identifier[zip] ( identifier[iparens] , identifier[iparens] ))
identifier[closing] = identifier[parens] . identifier[values] ()
keyword[def] identifier[balanced] ( identifier[astr] ):
identifier[stack] =[]
keyword[for] identifier[c] keyword[in] identifier[astr] :
identifier[d] = identifier[parens] . identifier[get] ( identifier[c] , keyword[None] )
keyword[if] identifier[d] :
identifier[stack] . identifier[append] ( identifier[d] )
keyword[elif] identifier[c] keyword[in] identifier[closing] :
keyword[if] keyword[not] identifier[stack] keyword[or] identifier[c] != identifier[stack] . identifier[pop] ():
keyword[return] keyword[False]
keyword[return] keyword[not] identifier[stack]
keyword[if] identifier[format_target] :
keyword[if] keyword[not] identifier[balanced] ( identifier[format_target] ):
keyword[raise] identifier[exceptions] . identifier[BalanceError] ( literal[string] ) | def validate_matched_parenthesis(cls, format_target):
""" Adapted from https://stackoverflow.com/questions/6701853/parentheses-pairing-issue
:param format_order:
:return:
"""
iparens = iter('(){}[]<>')
parens = dict(zip(iparens, iparens))
closing = parens.values()
def balanced(astr):
stack = []
for c in astr:
d = parens.get(c, None)
if d:
stack.append(d) # depends on [control=['if'], data=[]]
elif c in closing:
if not stack or c != stack.pop():
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=['c']]
return not stack
if format_target:
if not balanced(format_target):
raise exceptions.BalanceError('Format string has unmatching parentheses.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def delete_config_input_target_config_target_startup_startup(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
delete_config = ET.Element("delete_config")
config = delete_config
input = ET.SubElement(delete_config, "input")
target = ET.SubElement(input, "target")
config_target = ET.SubElement(target, "config-target")
startup = ET.SubElement(config_target, "startup")
startup = ET.SubElement(startup, "startup")
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[delete_config_input_target_config_target_startup_startup, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[delete_config] assign[=] call[name[ET].Element, parameter[constant[delete_config]]]
variable[config] assign[=] name[delete_config]
variable[input] assign[=] call[name[ET].SubElement, parameter[name[delete_config], constant[input]]]
variable[target] assign[=] call[name[ET].SubElement, parameter[name[input], constant[target]]]
variable[config_target] assign[=] call[name[ET].SubElement, parameter[name[target], constant[config-target]]]
variable[startup] assign[=] call[name[ET].SubElement, parameter[name[config_target], constant[startup]]]
variable[startup] assign[=] call[name[ET].SubElement, parameter[name[startup], constant[startup]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[delete_config_input_target_config_target_startup_startup] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[delete_config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[delete_config]
identifier[input] = identifier[ET] . identifier[SubElement] ( identifier[delete_config] , literal[string] )
identifier[target] = identifier[ET] . identifier[SubElement] ( identifier[input] , literal[string] )
identifier[config_target] = identifier[ET] . identifier[SubElement] ( identifier[target] , literal[string] )
identifier[startup] = identifier[ET] . identifier[SubElement] ( identifier[config_target] , literal[string] )
identifier[startup] = identifier[ET] . identifier[SubElement] ( identifier[startup] , literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def delete_config_input_target_config_target_startup_startup(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
delete_config = ET.Element('delete_config')
config = delete_config
input = ET.SubElement(delete_config, 'input')
target = ET.SubElement(input, 'target')
config_target = ET.SubElement(target, 'config-target')
startup = ET.SubElement(config_target, 'startup')
startup = ET.SubElement(startup, 'startup')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_time_inqueue(self):
"""
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
"""
if self.submission is None: return None
if self.start is None:
delta = datetime.datetime.now() - self.submission
else:
delta = self.start - self.submission
# This happens when we read the exact start datetime from the ABINIT log file.
if delta.total_seconds() < 0: delta = datetime.timedelta(seconds=0)
return MyTimedelta.as_timedelta(delta) | def function[get_time_inqueue, parameter[self]]:
constant[
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
]
if compare[name[self].submission is constant[None]] begin[:]
return[constant[None]]
if compare[name[self].start is constant[None]] begin[:]
variable[delta] assign[=] binary_operation[call[name[datetime].datetime.now, parameter[]] - name[self].submission]
return[call[name[MyTimedelta].as_timedelta, parameter[name[delta]]]] | keyword[def] identifier[get_time_inqueue] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[submission] keyword[is] keyword[None] : keyword[return] keyword[None]
keyword[if] identifier[self] . identifier[start] keyword[is] keyword[None] :
identifier[delta] = identifier[datetime] . identifier[datetime] . identifier[now] ()- identifier[self] . identifier[submission]
keyword[else] :
identifier[delta] = identifier[self] . identifier[start] - identifier[self] . identifier[submission]
keyword[if] identifier[delta] . identifier[total_seconds] ()< literal[int] : identifier[delta] = identifier[datetime] . identifier[timedelta] ( identifier[seconds] = literal[int] )
keyword[return] identifier[MyTimedelta] . identifier[as_timedelta] ( identifier[delta] ) | def get_time_inqueue(self):
"""
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
"""
if self.submission is None:
return None # depends on [control=['if'], data=[]]
if self.start is None:
delta = datetime.datetime.now() - self.submission # depends on [control=['if'], data=[]]
else:
delta = self.start - self.submission
# This happens when we read the exact start datetime from the ABINIT log file.
if delta.total_seconds() < 0:
delta = datetime.timedelta(seconds=0) # depends on [control=['if'], data=[]]
return MyTimedelta.as_timedelta(delta) |
def apply_dict_depth_first(nodes, func, depth=0, as_dict=True, parents=None, pre=None, post=None):
'''
This function is similar to the `apply_depth_first` except that it operates
on the `OrderedDict`-based structure returned from `apply_depth_first` when
`as_dict=True`.
Note that if `as_dict` is `False`, the result of this function is given in
the entry/tuple form.
'''
if as_dict:
items = OrderedDict()
else:
items = []
if parents is None:
parents = []
node_count = len(nodes)
for i, (k, node) in enumerate(nodes.iteritems()):
first = (i == 0)
last = (i == (node_count - 1))
if pre is not None:
pre(k, node, parents, first, last, depth)
item = func(k, node, parents, first, last, depth)
item_parents = parents + [(k, node)]
if node.children is not None:
children = apply_dict_depth_first(node.children, func,
depth=depth + 1,
as_dict=as_dict,
parents=item_parents,
pre=pre, post=post)
else:
children = None
if post is not None:
post(k, node, parents, first, last, depth)
if as_dict:
items[k] = Node(item, children)
elif children:
items.append((item, children))
else:
items.append(item)
return items | def function[apply_dict_depth_first, parameter[nodes, func, depth, as_dict, parents, pre, post]]:
constant[
This function is similar to the `apply_depth_first` except that it operates
on the `OrderedDict`-based structure returned from `apply_depth_first` when
`as_dict=True`.
Note that if `as_dict` is `False`, the result of this function is given in
the entry/tuple form.
]
if name[as_dict] begin[:]
variable[items] assign[=] call[name[OrderedDict], parameter[]]
if compare[name[parents] is constant[None]] begin[:]
variable[parents] assign[=] list[[]]
variable[node_count] assign[=] call[name[len], parameter[name[nodes]]]
for taget[tuple[[<ast.Name object at 0x7da2047e8850>, <ast.Tuple object at 0x7da2047e8430>]]] in starred[call[name[enumerate], parameter[call[name[nodes].iteritems, parameter[]]]]] begin[:]
variable[first] assign[=] compare[name[i] equal[==] constant[0]]
variable[last] assign[=] compare[name[i] equal[==] binary_operation[name[node_count] - constant[1]]]
if compare[name[pre] is_not constant[None]] begin[:]
call[name[pre], parameter[name[k], name[node], name[parents], name[first], name[last], name[depth]]]
variable[item] assign[=] call[name[func], parameter[name[k], name[node], name[parents], name[first], name[last], name[depth]]]
variable[item_parents] assign[=] binary_operation[name[parents] + list[[<ast.Tuple object at 0x7da2047eb850>]]]
if compare[name[node].children is_not constant[None]] begin[:]
variable[children] assign[=] call[name[apply_dict_depth_first], parameter[name[node].children, name[func]]]
if compare[name[post] is_not constant[None]] begin[:]
call[name[post], parameter[name[k], name[node], name[parents], name[first], name[last], name[depth]]]
if name[as_dict] begin[:]
call[name[items]][name[k]] assign[=] call[name[Node], parameter[name[item], name[children]]]
return[name[items]] | keyword[def] identifier[apply_dict_depth_first] ( identifier[nodes] , identifier[func] , identifier[depth] = literal[int] , identifier[as_dict] = keyword[True] , identifier[parents] = keyword[None] , identifier[pre] = keyword[None] , identifier[post] = keyword[None] ):
literal[string]
keyword[if] identifier[as_dict] :
identifier[items] = identifier[OrderedDict] ()
keyword[else] :
identifier[items] =[]
keyword[if] identifier[parents] keyword[is] keyword[None] :
identifier[parents] =[]
identifier[node_count] = identifier[len] ( identifier[nodes] )
keyword[for] identifier[i] ,( identifier[k] , identifier[node] ) keyword[in] identifier[enumerate] ( identifier[nodes] . identifier[iteritems] ()):
identifier[first] =( identifier[i] == literal[int] )
identifier[last] =( identifier[i] ==( identifier[node_count] - literal[int] ))
keyword[if] identifier[pre] keyword[is] keyword[not] keyword[None] :
identifier[pre] ( identifier[k] , identifier[node] , identifier[parents] , identifier[first] , identifier[last] , identifier[depth] )
identifier[item] = identifier[func] ( identifier[k] , identifier[node] , identifier[parents] , identifier[first] , identifier[last] , identifier[depth] )
identifier[item_parents] = identifier[parents] +[( identifier[k] , identifier[node] )]
keyword[if] identifier[node] . identifier[children] keyword[is] keyword[not] keyword[None] :
identifier[children] = identifier[apply_dict_depth_first] ( identifier[node] . identifier[children] , identifier[func] ,
identifier[depth] = identifier[depth] + literal[int] ,
identifier[as_dict] = identifier[as_dict] ,
identifier[parents] = identifier[item_parents] ,
identifier[pre] = identifier[pre] , identifier[post] = identifier[post] )
keyword[else] :
identifier[children] = keyword[None]
keyword[if] identifier[post] keyword[is] keyword[not] keyword[None] :
identifier[post] ( identifier[k] , identifier[node] , identifier[parents] , identifier[first] , identifier[last] , identifier[depth] )
keyword[if] identifier[as_dict] :
identifier[items] [ identifier[k] ]= identifier[Node] ( identifier[item] , identifier[children] )
keyword[elif] identifier[children] :
identifier[items] . identifier[append] (( identifier[item] , identifier[children] ))
keyword[else] :
identifier[items] . identifier[append] ( identifier[item] )
keyword[return] identifier[items] | def apply_dict_depth_first(nodes, func, depth=0, as_dict=True, parents=None, pre=None, post=None):
"""
This function is similar to the `apply_depth_first` except that it operates
on the `OrderedDict`-based structure returned from `apply_depth_first` when
`as_dict=True`.
Note that if `as_dict` is `False`, the result of this function is given in
the entry/tuple form.
"""
if as_dict:
items = OrderedDict() # depends on [control=['if'], data=[]]
else:
items = []
if parents is None:
parents = [] # depends on [control=['if'], data=['parents']]
node_count = len(nodes)
for (i, (k, node)) in enumerate(nodes.iteritems()):
first = i == 0
last = i == node_count - 1
if pre is not None:
pre(k, node, parents, first, last, depth) # depends on [control=['if'], data=['pre']]
item = func(k, node, parents, first, last, depth)
item_parents = parents + [(k, node)]
if node.children is not None:
children = apply_dict_depth_first(node.children, func, depth=depth + 1, as_dict=as_dict, parents=item_parents, pre=pre, post=post) # depends on [control=['if'], data=[]]
else:
children = None
if post is not None:
post(k, node, parents, first, last, depth) # depends on [control=['if'], data=['post']]
if as_dict:
items[k] = Node(item, children) # depends on [control=['if'], data=[]]
elif children:
items.append((item, children)) # depends on [control=['if'], data=[]]
else:
items.append(item) # depends on [control=['for'], data=[]]
return items |
def count_trackbacks_handler(sender, **kwargs):
"""
Update Entry.trackback_count when a trackback was posted.
"""
entry = kwargs['entry']
entry.trackback_count = F('trackback_count') + 1
entry.save(update_fields=['trackback_count']) | def function[count_trackbacks_handler, parameter[sender]]:
constant[
Update Entry.trackback_count when a trackback was posted.
]
variable[entry] assign[=] call[name[kwargs]][constant[entry]]
name[entry].trackback_count assign[=] binary_operation[call[name[F], parameter[constant[trackback_count]]] + constant[1]]
call[name[entry].save, parameter[]] | keyword[def] identifier[count_trackbacks_handler] ( identifier[sender] ,** identifier[kwargs] ):
literal[string]
identifier[entry] = identifier[kwargs] [ literal[string] ]
identifier[entry] . identifier[trackback_count] = identifier[F] ( literal[string] )+ literal[int]
identifier[entry] . identifier[save] ( identifier[update_fields] =[ literal[string] ]) | def count_trackbacks_handler(sender, **kwargs):
"""
Update Entry.trackback_count when a trackback was posted.
"""
entry = kwargs['entry']
entry.trackback_count = F('trackback_count') + 1
entry.save(update_fields=['trackback_count']) |
def isTemporal(inferenceType):
""" Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
"""
if InferenceType.__temporalInferenceTypes is None:
InferenceType.__temporalInferenceTypes = \
set([InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep])
return inferenceType in InferenceType.__temporalInferenceTypes | def function[isTemporal, parameter[inferenceType]]:
constant[ Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
]
if compare[name[InferenceType].__temporalInferenceTypes is constant[None]] begin[:]
name[InferenceType].__temporalInferenceTypes assign[=] call[name[set], parameter[list[[<ast.Attribute object at 0x7da1b23476d0>, <ast.Attribute object at 0x7da1b2346230>, <ast.Attribute object at 0x7da1b2345300>, <ast.Attribute object at 0x7da1b2345c00>, <ast.Attribute object at 0x7da1b2344b20>]]]]
return[compare[name[inferenceType] in name[InferenceType].__temporalInferenceTypes]] | keyword[def] identifier[isTemporal] ( identifier[inferenceType] ):
literal[string]
keyword[if] identifier[InferenceType] . identifier[__temporalInferenceTypes] keyword[is] keyword[None] :
identifier[InferenceType] . identifier[__temporalInferenceTypes] = identifier[set] ([ identifier[InferenceType] . identifier[TemporalNextStep] ,
identifier[InferenceType] . identifier[TemporalClassification] ,
identifier[InferenceType] . identifier[TemporalAnomaly] ,
identifier[InferenceType] . identifier[TemporalMultiStep] ,
identifier[InferenceType] . identifier[NontemporalMultiStep] ])
keyword[return] identifier[inferenceType] keyword[in] identifier[InferenceType] . identifier[__temporalInferenceTypes] | def isTemporal(inferenceType):
""" Returns True if the inference type is 'temporal', i.e. requires a
temporal memory in the network.
"""
if InferenceType.__temporalInferenceTypes is None:
InferenceType.__temporalInferenceTypes = set([InferenceType.TemporalNextStep, InferenceType.TemporalClassification, InferenceType.TemporalAnomaly, InferenceType.TemporalMultiStep, InferenceType.NontemporalMultiStep]) # depends on [control=['if'], data=[]]
return inferenceType in InferenceType.__temporalInferenceTypes |
def accessible_organisms(user, orgs):
"""Get the list of organisms accessible to a user, filtered by `orgs`"""
permission_map = {
x['organism']: x['permissions']
for x in user.organismPermissions
if 'WRITE' in x['permissions'] or
'READ' in x['permissions'] or
'ADMINISTRATE' in x['permissions'] or
user.role == 'ADMIN'
}
if 'error' in orgs:
raise Exception("Error received from Apollo server: \"%s\"" % orgs['error'])
return [
(org['commonName'], org['id'], False)
for org in sorted(orgs, key=lambda x: x['commonName'])
if org['commonName'] in permission_map
] | def function[accessible_organisms, parameter[user, orgs]]:
constant[Get the list of organisms accessible to a user, filtered by `orgs`]
variable[permission_map] assign[=] <ast.DictComp object at 0x7da20c76e4a0>
if compare[constant[error] in name[orgs]] begin[:]
<ast.Raise object at 0x7da20c76f520>
return[<ast.ListComp object at 0x7da20c76d900>] | keyword[def] identifier[accessible_organisms] ( identifier[user] , identifier[orgs] ):
literal[string]
identifier[permission_map] ={
identifier[x] [ literal[string] ]: identifier[x] [ literal[string] ]
keyword[for] identifier[x] keyword[in] identifier[user] . identifier[organismPermissions]
keyword[if] literal[string] keyword[in] identifier[x] [ literal[string] ] keyword[or]
literal[string] keyword[in] identifier[x] [ literal[string] ] keyword[or]
literal[string] keyword[in] identifier[x] [ literal[string] ] keyword[or]
identifier[user] . identifier[role] == literal[string]
}
keyword[if] literal[string] keyword[in] identifier[orgs] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[orgs] [ literal[string] ])
keyword[return] [
( identifier[org] [ literal[string] ], identifier[org] [ literal[string] ], keyword[False] )
keyword[for] identifier[org] keyword[in] identifier[sorted] ( identifier[orgs] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[string] ])
keyword[if] identifier[org] [ literal[string] ] keyword[in] identifier[permission_map]
] | def accessible_organisms(user, orgs):
"""Get the list of organisms accessible to a user, filtered by `orgs`"""
permission_map = {x['organism']: x['permissions'] for x in user.organismPermissions if 'WRITE' in x['permissions'] or 'READ' in x['permissions'] or 'ADMINISTRATE' in x['permissions'] or (user.role == 'ADMIN')}
if 'error' in orgs:
raise Exception('Error received from Apollo server: "%s"' % orgs['error']) # depends on [control=['if'], data=['orgs']]
return [(org['commonName'], org['id'], False) for org in sorted(orgs, key=lambda x: x['commonName']) if org['commonName'] in permission_map] |
def _find_statistics(X, y, variogram_function,
variogram_model_parameters, coordinates_type):
"""Calculates variogram fit statistics.
Returns the delta, sigma, and epsilon values for the variogram fit.
These arrays are used for statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
delta: ndarray
residuals between observed values and kriged estimates for those values
sigma: ndarray
mean error in kriging estimates
epsilon: ndarray
residuals normalized by their mean error
"""
delta = np.zeros(y.shape)
sigma = np.zeros(y.shape)
for i in range(y.shape[0]):
# skip the first value in the kriging problem
if i == 0:
continue
else:
k, ss = _krige(X[:i, :], y[:i], X[i, :], variogram_function,
variogram_model_parameters, coordinates_type)
# if the estimation error is zero, it's probably because
# the evaluation point X[i, :] is really close to one of the
# kriging system points in X[:i, :]...
# in the case of zero estimation error, the results are not stored
if np.absolute(ss) < eps:
continue
delta[i] = y[i] - k
sigma[i] = np.sqrt(ss)
# only use non-zero entries in these arrays... sigma is used to pull out
# non-zero entries in both cases because it is guaranteed to be positive,
# whereas delta can be either positive or negative
delta = delta[sigma > eps]
sigma = sigma[sigma > eps]
epsilon = delta/sigma
return delta, sigma, epsilon | def function[_find_statistics, parameter[X, y, variogram_function, variogram_model_parameters, coordinates_type]]:
constant[Calculates variogram fit statistics.
Returns the delta, sigma, and epsilon values for the variogram fit.
These arrays are used for statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
delta: ndarray
residuals between observed values and kriged estimates for those values
sigma: ndarray
mean error in kriging estimates
epsilon: ndarray
residuals normalized by their mean error
]
variable[delta] assign[=] call[name[np].zeros, parameter[name[y].shape]]
variable[sigma] assign[=] call[name[np].zeros, parameter[name[y].shape]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[y].shape][constant[0]]]]] begin[:]
if compare[name[i] equal[==] constant[0]] begin[:]
continue
variable[delta] assign[=] call[name[delta]][compare[name[sigma] greater[>] name[eps]]]
variable[sigma] assign[=] call[name[sigma]][compare[name[sigma] greater[>] name[eps]]]
variable[epsilon] assign[=] binary_operation[name[delta] / name[sigma]]
return[tuple[[<ast.Name object at 0x7da2054a6e60>, <ast.Name object at 0x7da2054a4be0>, <ast.Name object at 0x7da2054a7c40>]]] | keyword[def] identifier[_find_statistics] ( identifier[X] , identifier[y] , identifier[variogram_function] ,
identifier[variogram_model_parameters] , identifier[coordinates_type] ):
literal[string]
identifier[delta] = identifier[np] . identifier[zeros] ( identifier[y] . identifier[shape] )
identifier[sigma] = identifier[np] . identifier[zeros] ( identifier[y] . identifier[shape] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[y] . identifier[shape] [ literal[int] ]):
keyword[if] identifier[i] == literal[int] :
keyword[continue]
keyword[else] :
identifier[k] , identifier[ss] = identifier[_krige] ( identifier[X] [: identifier[i] ,:], identifier[y] [: identifier[i] ], identifier[X] [ identifier[i] ,:], identifier[variogram_function] ,
identifier[variogram_model_parameters] , identifier[coordinates_type] )
keyword[if] identifier[np] . identifier[absolute] ( identifier[ss] )< identifier[eps] :
keyword[continue]
identifier[delta] [ identifier[i] ]= identifier[y] [ identifier[i] ]- identifier[k]
identifier[sigma] [ identifier[i] ]= identifier[np] . identifier[sqrt] ( identifier[ss] )
identifier[delta] = identifier[delta] [ identifier[sigma] > identifier[eps] ]
identifier[sigma] = identifier[sigma] [ identifier[sigma] > identifier[eps] ]
identifier[epsilon] = identifier[delta] / identifier[sigma]
keyword[return] identifier[delta] , identifier[sigma] , identifier[epsilon] | def _find_statistics(X, y, variogram_function, variogram_model_parameters, coordinates_type):
"""Calculates variogram fit statistics.
Returns the delta, sigma, and epsilon values for the variogram fit.
These arrays are used for statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
delta: ndarray
residuals between observed values and kriged estimates for those values
sigma: ndarray
mean error in kriging estimates
epsilon: ndarray
residuals normalized by their mean error
"""
delta = np.zeros(y.shape)
sigma = np.zeros(y.shape)
for i in range(y.shape[0]):
# skip the first value in the kriging problem
if i == 0:
continue # depends on [control=['if'], data=[]]
else:
(k, ss) = _krige(X[:i, :], y[:i], X[i, :], variogram_function, variogram_model_parameters, coordinates_type)
# if the estimation error is zero, it's probably because
# the evaluation point X[i, :] is really close to one of the
# kriging system points in X[:i, :]...
# in the case of zero estimation error, the results are not stored
if np.absolute(ss) < eps:
continue # depends on [control=['if'], data=[]]
delta[i] = y[i] - k
sigma[i] = np.sqrt(ss) # depends on [control=['for'], data=['i']]
# only use non-zero entries in these arrays... sigma is used to pull out
# non-zero entries in both cases because it is guaranteed to be positive,
# whereas delta can be either positive or negative
delta = delta[sigma > eps]
sigma = sigma[sigma > eps]
epsilon = delta / sigma
return (delta, sigma, epsilon) |
def first(self, rows: List[Row]) -> List[Row]:
"""
Takes an expression that evaluates to a list of rows, and returns the first one in that
list.
"""
if not rows:
logger.warning("Trying to get first row from an empty list")
return []
return [rows[0]] | def function[first, parameter[self, rows]]:
constant[
Takes an expression that evaluates to a list of rows, and returns the first one in that
list.
]
if <ast.UnaryOp object at 0x7da18f811c30> begin[:]
call[name[logger].warning, parameter[constant[Trying to get first row from an empty list]]]
return[list[[]]]
return[list[[<ast.Subscript object at 0x7da18f811510>]]] | keyword[def] identifier[first] ( identifier[self] , identifier[rows] : identifier[List] [ identifier[Row] ])-> identifier[List] [ identifier[Row] ]:
literal[string]
keyword[if] keyword[not] identifier[rows] :
identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] []
keyword[return] [ identifier[rows] [ literal[int] ]] | def first(self, rows: List[Row]) -> List[Row]:
"""
Takes an expression that evaluates to a list of rows, and returns the first one in that
list.
"""
if not rows:
logger.warning('Trying to get first row from an empty list')
return [] # depends on [control=['if'], data=[]]
return [rows[0]] |
def get_catalog_info(self, key=None, default=None):
"""
Get information from the catalog config file.
If *key* is `None`, return the full dict.
"""
if key is None:
return self._init_kwargs
return self._init_kwargs.get(key, default) | def function[get_catalog_info, parameter[self, key, default]]:
constant[
Get information from the catalog config file.
If *key* is `None`, return the full dict.
]
if compare[name[key] is constant[None]] begin[:]
return[name[self]._init_kwargs]
return[call[name[self]._init_kwargs.get, parameter[name[key], name[default]]]] | keyword[def] identifier[get_catalog_info] ( identifier[self] , identifier[key] = keyword[None] , identifier[default] = keyword[None] ):
literal[string]
keyword[if] identifier[key] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_init_kwargs]
keyword[return] identifier[self] . identifier[_init_kwargs] . identifier[get] ( identifier[key] , identifier[default] ) | def get_catalog_info(self, key=None, default=None):
"""
Get information from the catalog config file.
If *key* is `None`, return the full dict.
"""
if key is None:
return self._init_kwargs # depends on [control=['if'], data=[]]
return self._init_kwargs.get(key, default) |
def get_molecule_hash(self, mol):
"""
Return inchi as molecular hash
"""
obmol = BabelMolAdaptor(mol).openbabel_mol
inchi = self._inchi_labels(obmol)[2]
return inchi | def function[get_molecule_hash, parameter[self, mol]]:
constant[
Return inchi as molecular hash
]
variable[obmol] assign[=] call[name[BabelMolAdaptor], parameter[name[mol]]].openbabel_mol
variable[inchi] assign[=] call[call[name[self]._inchi_labels, parameter[name[obmol]]]][constant[2]]
return[name[inchi]] | keyword[def] identifier[get_molecule_hash] ( identifier[self] , identifier[mol] ):
literal[string]
identifier[obmol] = identifier[BabelMolAdaptor] ( identifier[mol] ). identifier[openbabel_mol]
identifier[inchi] = identifier[self] . identifier[_inchi_labels] ( identifier[obmol] )[ literal[int] ]
keyword[return] identifier[inchi] | def get_molecule_hash(self, mol):
"""
Return inchi as molecular hash
"""
obmol = BabelMolAdaptor(mol).openbabel_mol
inchi = self._inchi_labels(obmol)[2]
return inchi |
def add_mandates(self, representative, rep_json):
'''
Create mandates from rep data based on variant configuration
'''
# Mandate in country group for party constituency
if rep_json.get('parti_ratt_financier'):
constituency, _ = Constituency.objects.get_or_create(
name=rep_json.get('parti_ratt_financier'), country=self.france)
group, _ = self.touch_model(model=Group,
abbreviation=self.france.code,
kind='country',
name=self.france.name)
_create_mandate(representative, group, constituency, 'membre')
# Configurable mandates
for mdef in self.variant['mandates']:
if mdef.get('chamber', False):
chamber = self.chamber
else:
chamber = None
if 'from' in mdef:
elems = mdef['from'](rep_json)
else:
elems = [rep_json]
for elem in elems:
name = _get_mdef_item(mdef, 'name', elem, '')
abbr = _get_mdef_item(mdef, 'abbr', elem, '')
group, _ = self.touch_model(model=Group,
abbreviation=abbr,
kind=mdef['kind'],
chamber=chamber,
name=name)
role = _get_mdef_item(mdef, 'role', elem, 'membre')
start = _get_mdef_item(mdef, 'start', elem, None)
if start is not None:
start = _parse_date(start)
end = _get_mdef_item(mdef, 'end', elem, None)
if end is not None:
end = _parse_date(end)
_create_mandate(representative, group, self.ch_constituency,
role, start, end)
logger.debug(
'%s => %s: %s of "%s" (%s) %s-%s' % (rep_json['slug'],
mdef['kind'], role, name, abbr, start, end)) | def function[add_mandates, parameter[self, representative, rep_json]]:
constant[
Create mandates from rep data based on variant configuration
]
if call[name[rep_json].get, parameter[constant[parti_ratt_financier]]] begin[:]
<ast.Tuple object at 0x7da18f09dc30> assign[=] call[name[Constituency].objects.get_or_create, parameter[]]
<ast.Tuple object at 0x7da18f09ff70> assign[=] call[name[self].touch_model, parameter[]]
call[name[_create_mandate], parameter[name[representative], name[group], name[constituency], constant[membre]]]
for taget[name[mdef]] in starred[call[name[self].variant][constant[mandates]]] begin[:]
if call[name[mdef].get, parameter[constant[chamber], constant[False]]] begin[:]
variable[chamber] assign[=] name[self].chamber
if compare[constant[from] in name[mdef]] begin[:]
variable[elems] assign[=] call[call[name[mdef]][constant[from]], parameter[name[rep_json]]]
for taget[name[elem]] in starred[name[elems]] begin[:]
variable[name] assign[=] call[name[_get_mdef_item], parameter[name[mdef], constant[name], name[elem], constant[]]]
variable[abbr] assign[=] call[name[_get_mdef_item], parameter[name[mdef], constant[abbr], name[elem], constant[]]]
<ast.Tuple object at 0x7da1b1621b70> assign[=] call[name[self].touch_model, parameter[]]
variable[role] assign[=] call[name[_get_mdef_item], parameter[name[mdef], constant[role], name[elem], constant[membre]]]
variable[start] assign[=] call[name[_get_mdef_item], parameter[name[mdef], constant[start], name[elem], constant[None]]]
if compare[name[start] is_not constant[None]] begin[:]
variable[start] assign[=] call[name[_parse_date], parameter[name[start]]]
variable[end] assign[=] call[name[_get_mdef_item], parameter[name[mdef], constant[end], name[elem], constant[None]]]
if compare[name[end] is_not constant[None]] begin[:]
variable[end] assign[=] call[name[_parse_date], parameter[name[end]]]
call[name[_create_mandate], parameter[name[representative], name[group], name[self].ch_constituency, name[role], name[start], name[end]]]
call[name[logger].debug, parameter[binary_operation[constant[%s => %s: %s of "%s" (%s) %s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b14708e0>, <ast.Subscript object at 0x7da1b14736d0>, <ast.Name object at 0x7da1b1470160>, <ast.Name object at 0x7da1b14704f0>, <ast.Name object at 0x7da1b1473100>, <ast.Name object at 0x7da1b14721d0>, <ast.Name object at 0x7da1b1473df0>]]]]] | keyword[def] identifier[add_mandates] ( identifier[self] , identifier[representative] , identifier[rep_json] ):
literal[string]
keyword[if] identifier[rep_json] . identifier[get] ( literal[string] ):
identifier[constituency] , identifier[_] = identifier[Constituency] . identifier[objects] . identifier[get_or_create] (
identifier[name] = identifier[rep_json] . identifier[get] ( literal[string] ), identifier[country] = identifier[self] . identifier[france] )
identifier[group] , identifier[_] = identifier[self] . identifier[touch_model] ( identifier[model] = identifier[Group] ,
identifier[abbreviation] = identifier[self] . identifier[france] . identifier[code] ,
identifier[kind] = literal[string] ,
identifier[name] = identifier[self] . identifier[france] . identifier[name] )
identifier[_create_mandate] ( identifier[representative] , identifier[group] , identifier[constituency] , literal[string] )
keyword[for] identifier[mdef] keyword[in] identifier[self] . identifier[variant] [ literal[string] ]:
keyword[if] identifier[mdef] . identifier[get] ( literal[string] , keyword[False] ):
identifier[chamber] = identifier[self] . identifier[chamber]
keyword[else] :
identifier[chamber] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[mdef] :
identifier[elems] = identifier[mdef] [ literal[string] ]( identifier[rep_json] )
keyword[else] :
identifier[elems] =[ identifier[rep_json] ]
keyword[for] identifier[elem] keyword[in] identifier[elems] :
identifier[name] = identifier[_get_mdef_item] ( identifier[mdef] , literal[string] , identifier[elem] , literal[string] )
identifier[abbr] = identifier[_get_mdef_item] ( identifier[mdef] , literal[string] , identifier[elem] , literal[string] )
identifier[group] , identifier[_] = identifier[self] . identifier[touch_model] ( identifier[model] = identifier[Group] ,
identifier[abbreviation] = identifier[abbr] ,
identifier[kind] = identifier[mdef] [ literal[string] ],
identifier[chamber] = identifier[chamber] ,
identifier[name] = identifier[name] )
identifier[role] = identifier[_get_mdef_item] ( identifier[mdef] , literal[string] , identifier[elem] , literal[string] )
identifier[start] = identifier[_get_mdef_item] ( identifier[mdef] , literal[string] , identifier[elem] , keyword[None] )
keyword[if] identifier[start] keyword[is] keyword[not] keyword[None] :
identifier[start] = identifier[_parse_date] ( identifier[start] )
identifier[end] = identifier[_get_mdef_item] ( identifier[mdef] , literal[string] , identifier[elem] , keyword[None] )
keyword[if] identifier[end] keyword[is] keyword[not] keyword[None] :
identifier[end] = identifier[_parse_date] ( identifier[end] )
identifier[_create_mandate] ( identifier[representative] , identifier[group] , identifier[self] . identifier[ch_constituency] ,
identifier[role] , identifier[start] , identifier[end] )
identifier[logger] . identifier[debug] (
literal[string] %( identifier[rep_json] [ literal[string] ],
identifier[mdef] [ literal[string] ], identifier[role] , identifier[name] , identifier[abbr] , identifier[start] , identifier[end] )) | def add_mandates(self, representative, rep_json):
"""
Create mandates from rep data based on variant configuration
"""
# Mandate in country group for party constituency
if rep_json.get('parti_ratt_financier'):
(constituency, _) = Constituency.objects.get_or_create(name=rep_json.get('parti_ratt_financier'), country=self.france)
(group, _) = self.touch_model(model=Group, abbreviation=self.france.code, kind='country', name=self.france.name)
_create_mandate(representative, group, constituency, 'membre') # depends on [control=['if'], data=[]]
# Configurable mandates
for mdef in self.variant['mandates']:
if mdef.get('chamber', False):
chamber = self.chamber # depends on [control=['if'], data=[]]
else:
chamber = None
if 'from' in mdef:
elems = mdef['from'](rep_json) # depends on [control=['if'], data=['mdef']]
else:
elems = [rep_json]
for elem in elems:
name = _get_mdef_item(mdef, 'name', elem, '')
abbr = _get_mdef_item(mdef, 'abbr', elem, '')
(group, _) = self.touch_model(model=Group, abbreviation=abbr, kind=mdef['kind'], chamber=chamber, name=name)
role = _get_mdef_item(mdef, 'role', elem, 'membre')
start = _get_mdef_item(mdef, 'start', elem, None)
if start is not None:
start = _parse_date(start) # depends on [control=['if'], data=['start']]
end = _get_mdef_item(mdef, 'end', elem, None)
if end is not None:
end = _parse_date(end) # depends on [control=['if'], data=['end']]
_create_mandate(representative, group, self.ch_constituency, role, start, end)
logger.debug('%s => %s: %s of "%s" (%s) %s-%s' % (rep_json['slug'], mdef['kind'], role, name, abbr, start, end)) # depends on [control=['for'], data=['elem']] # depends on [control=['for'], data=['mdef']] |
def experiments_fmri_create(self, experiment_id, filename):
"""Create functional data object from given file and associate the
object with the specified experiment.
Parameters
----------
experiment_id : string
Unique experiment identifier
filename : File-type object
Functional data file
Returns
-------
FMRIDataHandle
Handle for created fMRI object or None if identified experiment
is unknown
"""
# Get the experiment to ensure that it exist before we even create the
# functional data object
experiment = self.experiments_get(experiment_id)
if experiment is None:
return None
# Create functional data object from given file
fmri = self.funcdata.create_object(filename)
# Update experiment to associate it with created fMRI object. Assign
# result to experiment. Should the experiment have been deleted in
# parallel the result will be None
experiment = self.experiments.update_fmri_data(experiment_id, fmri.identifier)
if experiment is None:
# Delete fMRI object's data directory
shutil.rmtree(fmri.directory)
# Delete functional data object from databases
self.funcdata.delete_object(fmri.identifier, erase=True)
return None
else:
return funcdata.FMRIDataHandle(fmri, experiment_id) | def function[experiments_fmri_create, parameter[self, experiment_id, filename]]:
constant[Create functional data object from given file and associate the
object with the specified experiment.
Parameters
----------
experiment_id : string
Unique experiment identifier
filename : File-type object
Functional data file
Returns
-------
FMRIDataHandle
Handle for created fMRI object or None if identified experiment
is unknown
]
variable[experiment] assign[=] call[name[self].experiments_get, parameter[name[experiment_id]]]
if compare[name[experiment] is constant[None]] begin[:]
return[constant[None]]
variable[fmri] assign[=] call[name[self].funcdata.create_object, parameter[name[filename]]]
variable[experiment] assign[=] call[name[self].experiments.update_fmri_data, parameter[name[experiment_id], name[fmri].identifier]]
if compare[name[experiment] is constant[None]] begin[:]
call[name[shutil].rmtree, parameter[name[fmri].directory]]
call[name[self].funcdata.delete_object, parameter[name[fmri].identifier]]
return[constant[None]] | keyword[def] identifier[experiments_fmri_create] ( identifier[self] , identifier[experiment_id] , identifier[filename] ):
literal[string]
identifier[experiment] = identifier[self] . identifier[experiments_get] ( identifier[experiment_id] )
keyword[if] identifier[experiment] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[fmri] = identifier[self] . identifier[funcdata] . identifier[create_object] ( identifier[filename] )
identifier[experiment] = identifier[self] . identifier[experiments] . identifier[update_fmri_data] ( identifier[experiment_id] , identifier[fmri] . identifier[identifier] )
keyword[if] identifier[experiment] keyword[is] keyword[None] :
identifier[shutil] . identifier[rmtree] ( identifier[fmri] . identifier[directory] )
identifier[self] . identifier[funcdata] . identifier[delete_object] ( identifier[fmri] . identifier[identifier] , identifier[erase] = keyword[True] )
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[funcdata] . identifier[FMRIDataHandle] ( identifier[fmri] , identifier[experiment_id] ) | def experiments_fmri_create(self, experiment_id, filename):
"""Create functional data object from given file and associate the
object with the specified experiment.
Parameters
----------
experiment_id : string
Unique experiment identifier
filename : File-type object
Functional data file
Returns
-------
FMRIDataHandle
Handle for created fMRI object or None if identified experiment
is unknown
"""
# Get the experiment to ensure that it exist before we even create the
# functional data object
experiment = self.experiments_get(experiment_id)
if experiment is None:
return None # depends on [control=['if'], data=[]]
# Create functional data object from given file
fmri = self.funcdata.create_object(filename)
# Update experiment to associate it with created fMRI object. Assign
# result to experiment. Should the experiment have been deleted in
# parallel the result will be None
experiment = self.experiments.update_fmri_data(experiment_id, fmri.identifier)
if experiment is None:
# Delete fMRI object's data directory
shutil.rmtree(fmri.directory)
# Delete functional data object from databases
self.funcdata.delete_object(fmri.identifier, erase=True)
return None # depends on [control=['if'], data=[]]
else:
return funcdata.FMRIDataHandle(fmri, experiment_id) |
def load_data(path_dir):
'''Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
'''
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],
params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return data, params_tag, params_data | def function[load_data, parameter[path_dir]]:
constant[Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
]
import module[os]
import module[pylleo]
variable[exp_name] assign[=] call[call[name[os].path.split, parameter[name[path_dir]]]][constant[1]]
variable[params_tag] assign[=] call[name[pylleo].utils.parse_experiment_params, parameter[name[exp_name]]]
variable[meta] assign[=] call[name[pylleo].lleoio.read_meta, parameter[name[path_dir], call[name[params_tag]][constant[tag_model]], call[name[params_tag]][constant[tag_id]]]]
variable[data] assign[=] call[name[pylleo].lleoio.read_data, parameter[name[meta], name[path_dir]]]
variable[params_data] assign[=] call[name[pylleo].utils.get_tag_params, parameter[call[name[params_tag]][constant[tag_model]]]]
variable[params_data] assign[=] <ast.ListComp object at 0x7da1b168e890>
variable[params_data] assign[=] <ast.ListComp object at 0x7da1b168cca0>
return[tuple[[<ast.Name object at 0x7da1b168ceb0>, <ast.Name object at 0x7da1b168ec20>, <ast.Name object at 0x7da1b168d540>]]] | keyword[def] identifier[load_data] ( identifier[path_dir] ):
literal[string]
keyword[import] identifier[os]
keyword[import] identifier[pylleo]
identifier[exp_name] = identifier[os] . identifier[path] . identifier[split] ( identifier[path_dir] )[ literal[int] ]
identifier[params_tag] = identifier[pylleo] . identifier[utils] . identifier[parse_experiment_params] ( identifier[exp_name] )
identifier[meta] = identifier[pylleo] . identifier[lleoio] . identifier[read_meta] ( identifier[path_dir] , identifier[params_tag] [ literal[string] ],
identifier[params_tag] [ literal[string] ])
identifier[data] = identifier[pylleo] . identifier[lleoio] . identifier[read_data] ( identifier[meta] , identifier[path_dir] , identifier[sample_f] = identifier[sample_f] )
identifier[params_data] = identifier[pylleo] . identifier[utils] . identifier[get_tag_params] ( identifier[params_tag] [ literal[string] ])
identifier[params_data] =[ identifier[pylleo] . identifier[utils] . identifier[posix_string] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[params_data] ]
identifier[params_data] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[params_data] keyword[if] identifier[p] . identifier[startswith] ( literal[string] )]
keyword[return] identifier[data] , identifier[params_tag] , identifier[params_data] | def load_data(path_dir):
"""Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
"""
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'], params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return (data, params_tag, params_data) |
def fastqIteratorSimple(fn, verbose=False, allowNameMissmatch=False):
"""
A generator function that yields FastqSequence objects read from a
fastq-format stream or filename. This is iterator requires that all
sequence and quality data is provided on a single line -- put another way,
it cannot parse fastq files with newline characters interspersed in the
sequence and/or quality strings. That's probably okay though, as fastq
files tend not to be formated like that (famous last words..).
:param fn: filename or stream to read data from.
:param allowNameMismatch: don't throw error if name in sequence data
and quality data parts of a read don't match.
Newer version of CASVA seem to output data like
this, probably to save space.
:param verbose: if True, output additional status messages to stderr about
progress.
"""
fh = fn
if type(fh).__name__ == "str":
fh = open(fh)
# try to get an idea of how much data we have...
if verbose:
try:
totalLines = os.path.getsize(fh.name)
pind = ProgressIndicator(totalToDo=totalLines,
messagePrefix="completed",
messageSuffix="of processing "
+ fh.name)
except AttributeError:
sys.stderr.write("fastqIterator -- warning: " +
"unable to show progress for stream")
verbose = False
while True:
# read four lines.. if we can't get four lines, something is wrong
lines = []
gotLines = 0
while gotLines < 4:
l = fh.readline()
if verbose:
pind.done = fh.tell()
pind.showProgress()
if l == "":
# end of file found...
if gotLines == 0:
# ok, not in the middle of a sequence
break
else:
raise FastqFileFormatError("reached end of file in the "
+ "middle of sequence data")
l = l.strip()
if l == "":
continue
lines.append(l)
gotLines += 1
# couldn't get any more data.. we're done
if gotLines == 0:
break
# got our 4 lines, assemble our read..
# first check that names match
if lines[0][1:] != lines[2][1:] and not allowNameMissmatch:
raise FastqFileFormatError("names in sequence don't match : " +
str(lines[0][1:]) + " != " +
str(lines[2][1:]))
name = lines[0][1:]
seq = lines[1]
qual = lines[3]
yield NGSRead(seq, name, qual) | def function[fastqIteratorSimple, parameter[fn, verbose, allowNameMissmatch]]:
constant[
A generator function that yields FastqSequence objects read from a
fastq-format stream or filename. This is iterator requires that all
sequence and quality data is provided on a single line -- put another way,
it cannot parse fastq files with newline characters interspersed in the
sequence and/or quality strings. That's probably okay though, as fastq
files tend not to be formated like that (famous last words..).
:param fn: filename or stream to read data from.
:param allowNameMismatch: don't throw error if name in sequence data
and quality data parts of a read don't match.
Newer version of CASVA seem to output data like
this, probably to save space.
:param verbose: if True, output additional status messages to stderr about
progress.
]
variable[fh] assign[=] name[fn]
if compare[call[name[type], parameter[name[fh]]].__name__ equal[==] constant[str]] begin[:]
variable[fh] assign[=] call[name[open], parameter[name[fh]]]
if name[verbose] begin[:]
<ast.Try object at 0x7da1b1352170>
while constant[True] begin[:]
variable[lines] assign[=] list[[]]
variable[gotLines] assign[=] constant[0]
while compare[name[gotLines] less[<] constant[4]] begin[:]
variable[l] assign[=] call[name[fh].readline, parameter[]]
if name[verbose] begin[:]
name[pind].done assign[=] call[name[fh].tell, parameter[]]
call[name[pind].showProgress, parameter[]]
if compare[name[l] equal[==] constant[]] begin[:]
if compare[name[gotLines] equal[==] constant[0]] begin[:]
break
variable[l] assign[=] call[name[l].strip, parameter[]]
if compare[name[l] equal[==] constant[]] begin[:]
continue
call[name[lines].append, parameter[name[l]]]
<ast.AugAssign object at 0x7da1b1588970>
if compare[name[gotLines] equal[==] constant[0]] begin[:]
break
if <ast.BoolOp object at 0x7da1b1588130> begin[:]
<ast.Raise object at 0x7da1b1589c60>
variable[name] assign[=] call[call[name[lines]][constant[0]]][<ast.Slice object at 0x7da1b158a500>]
variable[seq] assign[=] call[name[lines]][constant[1]]
variable[qual] assign[=] call[name[lines]][constant[3]]
<ast.Yield object at 0x7da1b1522050> | keyword[def] identifier[fastqIteratorSimple] ( identifier[fn] , identifier[verbose] = keyword[False] , identifier[allowNameMissmatch] = keyword[False] ):
literal[string]
identifier[fh] = identifier[fn]
keyword[if] identifier[type] ( identifier[fh] ). identifier[__name__] == literal[string] :
identifier[fh] = identifier[open] ( identifier[fh] )
keyword[if] identifier[verbose] :
keyword[try] :
identifier[totalLines] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[fh] . identifier[name] )
identifier[pind] = identifier[ProgressIndicator] ( identifier[totalToDo] = identifier[totalLines] ,
identifier[messagePrefix] = literal[string] ,
identifier[messageSuffix] = literal[string]
+ identifier[fh] . identifier[name] )
keyword[except] identifier[AttributeError] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] +
literal[string] )
identifier[verbose] = keyword[False]
keyword[while] keyword[True] :
identifier[lines] =[]
identifier[gotLines] = literal[int]
keyword[while] identifier[gotLines] < literal[int] :
identifier[l] = identifier[fh] . identifier[readline] ()
keyword[if] identifier[verbose] :
identifier[pind] . identifier[done] = identifier[fh] . identifier[tell] ()
identifier[pind] . identifier[showProgress] ()
keyword[if] identifier[l] == literal[string] :
keyword[if] identifier[gotLines] == literal[int] :
keyword[break]
keyword[else] :
keyword[raise] identifier[FastqFileFormatError] ( literal[string]
+ literal[string] )
identifier[l] = identifier[l] . identifier[strip] ()
keyword[if] identifier[l] == literal[string] :
keyword[continue]
identifier[lines] . identifier[append] ( identifier[l] )
identifier[gotLines] += literal[int]
keyword[if] identifier[gotLines] == literal[int] :
keyword[break]
keyword[if] identifier[lines] [ literal[int] ][ literal[int] :]!= identifier[lines] [ literal[int] ][ literal[int] :] keyword[and] keyword[not] identifier[allowNameMissmatch] :
keyword[raise] identifier[FastqFileFormatError] ( literal[string] +
identifier[str] ( identifier[lines] [ literal[int] ][ literal[int] :])+ literal[string] +
identifier[str] ( identifier[lines] [ literal[int] ][ literal[int] :]))
identifier[name] = identifier[lines] [ literal[int] ][ literal[int] :]
identifier[seq] = identifier[lines] [ literal[int] ]
identifier[qual] = identifier[lines] [ literal[int] ]
keyword[yield] identifier[NGSRead] ( identifier[seq] , identifier[name] , identifier[qual] ) | def fastqIteratorSimple(fn, verbose=False, allowNameMissmatch=False):
"""
A generator function that yields FastqSequence objects read from a
fastq-format stream or filename. This is iterator requires that all
sequence and quality data is provided on a single line -- put another way,
it cannot parse fastq files with newline characters interspersed in the
sequence and/or quality strings. That's probably okay though, as fastq
files tend not to be formated like that (famous last words..).
:param fn: filename or stream to read data from.
:param allowNameMismatch: don't throw error if name in sequence data
and quality data parts of a read don't match.
Newer version of CASVA seem to output data like
this, probably to save space.
:param verbose: if True, output additional status messages to stderr about
progress.
"""
fh = fn
if type(fh).__name__ == 'str':
fh = open(fh) # depends on [control=['if'], data=[]]
# try to get an idea of how much data we have...
if verbose:
try:
totalLines = os.path.getsize(fh.name)
pind = ProgressIndicator(totalToDo=totalLines, messagePrefix='completed', messageSuffix='of processing ' + fh.name) # depends on [control=['try'], data=[]]
except AttributeError:
sys.stderr.write('fastqIterator -- warning: ' + 'unable to show progress for stream')
verbose = False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
while True:
# read four lines.. if we can't get four lines, something is wrong
lines = []
gotLines = 0
while gotLines < 4:
l = fh.readline()
if verbose:
pind.done = fh.tell()
pind.showProgress() # depends on [control=['if'], data=[]]
if l == '':
# end of file found...
if gotLines == 0:
# ok, not in the middle of a sequence
break # depends on [control=['if'], data=[]]
else:
raise FastqFileFormatError('reached end of file in the ' + 'middle of sequence data') # depends on [control=['if'], data=[]]
l = l.strip()
if l == '':
continue # depends on [control=['if'], data=[]]
lines.append(l)
gotLines += 1 # depends on [control=['while'], data=['gotLines']]
# couldn't get any more data.. we're done
if gotLines == 0:
break # depends on [control=['if'], data=[]]
# got our 4 lines, assemble our read..
# first check that names match
if lines[0][1:] != lines[2][1:] and (not allowNameMissmatch):
raise FastqFileFormatError("names in sequence don't match : " + str(lines[0][1:]) + ' != ' + str(lines[2][1:])) # depends on [control=['if'], data=[]]
name = lines[0][1:]
seq = lines[1]
qual = lines[3]
yield NGSRead(seq, name, qual) # depends on [control=['while'], data=[]] |
def status(self, info_or_id, download_dir=None):
"""
Return a constant describing the status of the given package
or collection. Status can be one of ``INSTALLED``,
``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
"""
if download_dir is None: download_dir = self._download_dir
info = self._info_or_id(info_or_id)
# Handle collections:
if isinstance(info, Collection):
pkg_status = [self.status(pkg.id) for pkg in info.packages]
if self.STALE in pkg_status:
return self.STALE
elif self.PARTIAL in pkg_status:
return self.PARTIAL
elif (self.INSTALLED in pkg_status and
self.NOT_INSTALLED in pkg_status):
return self.PARTIAL
elif self.NOT_INSTALLED in pkg_status:
return self.NOT_INSTALLED
else:
return self.INSTALLED
# Handle packages:
else:
filepath = os.path.join(download_dir, info.filename)
if download_dir != self._download_dir:
status = self._pkg_status(info, filepath)
else:
if info.id not in self._status_cache:
self._status_cache[info.id] = self._pkg_status(info,
filepath)
return self._status_cache[info.id] | def function[status, parameter[self, info_or_id, download_dir]]:
constant[
Return a constant describing the status of the given package
or collection. Status can be one of ``INSTALLED``,
``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
]
if compare[name[download_dir] is constant[None]] begin[:]
variable[download_dir] assign[=] name[self]._download_dir
variable[info] assign[=] call[name[self]._info_or_id, parameter[name[info_or_id]]]
if call[name[isinstance], parameter[name[info], name[Collection]]] begin[:]
variable[pkg_status] assign[=] <ast.ListComp object at 0x7da2044c3af0>
if compare[name[self].STALE in name[pkg_status]] begin[:]
return[name[self].STALE] | keyword[def] identifier[status] ( identifier[self] , identifier[info_or_id] , identifier[download_dir] = keyword[None] ):
literal[string]
keyword[if] identifier[download_dir] keyword[is] keyword[None] : identifier[download_dir] = identifier[self] . identifier[_download_dir]
identifier[info] = identifier[self] . identifier[_info_or_id] ( identifier[info_or_id] )
keyword[if] identifier[isinstance] ( identifier[info] , identifier[Collection] ):
identifier[pkg_status] =[ identifier[self] . identifier[status] ( identifier[pkg] . identifier[id] ) keyword[for] identifier[pkg] keyword[in] identifier[info] . identifier[packages] ]
keyword[if] identifier[self] . identifier[STALE] keyword[in] identifier[pkg_status] :
keyword[return] identifier[self] . identifier[STALE]
keyword[elif] identifier[self] . identifier[PARTIAL] keyword[in] identifier[pkg_status] :
keyword[return] identifier[self] . identifier[PARTIAL]
keyword[elif] ( identifier[self] . identifier[INSTALLED] keyword[in] identifier[pkg_status] keyword[and]
identifier[self] . identifier[NOT_INSTALLED] keyword[in] identifier[pkg_status] ):
keyword[return] identifier[self] . identifier[PARTIAL]
keyword[elif] identifier[self] . identifier[NOT_INSTALLED] keyword[in] identifier[pkg_status] :
keyword[return] identifier[self] . identifier[NOT_INSTALLED]
keyword[else] :
keyword[return] identifier[self] . identifier[INSTALLED]
keyword[else] :
identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[download_dir] , identifier[info] . identifier[filename] )
keyword[if] identifier[download_dir] != identifier[self] . identifier[_download_dir] :
identifier[status] = identifier[self] . identifier[_pkg_status] ( identifier[info] , identifier[filepath] )
keyword[else] :
keyword[if] identifier[info] . identifier[id] keyword[not] keyword[in] identifier[self] . identifier[_status_cache] :
identifier[self] . identifier[_status_cache] [ identifier[info] . identifier[id] ]= identifier[self] . identifier[_pkg_status] ( identifier[info] ,
identifier[filepath] )
keyword[return] identifier[self] . identifier[_status_cache] [ identifier[info] . identifier[id] ] | def status(self, info_or_id, download_dir=None):
"""
Return a constant describing the status of the given package
or collection. Status can be one of ``INSTALLED``,
``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
"""
if download_dir is None:
download_dir = self._download_dir # depends on [control=['if'], data=['download_dir']]
info = self._info_or_id(info_or_id)
# Handle collections:
if isinstance(info, Collection):
pkg_status = [self.status(pkg.id) for pkg in info.packages]
if self.STALE in pkg_status:
return self.STALE # depends on [control=['if'], data=[]]
elif self.PARTIAL in pkg_status:
return self.PARTIAL # depends on [control=['if'], data=[]]
elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status:
return self.PARTIAL # depends on [control=['if'], data=[]]
elif self.NOT_INSTALLED in pkg_status:
return self.NOT_INSTALLED # depends on [control=['if'], data=[]]
else:
return self.INSTALLED # depends on [control=['if'], data=[]]
else:
# Handle packages:
filepath = os.path.join(download_dir, info.filename)
if download_dir != self._download_dir:
status = self._pkg_status(info, filepath) # depends on [control=['if'], data=[]]
else:
if info.id not in self._status_cache:
self._status_cache[info.id] = self._pkg_status(info, filepath) # depends on [control=['if'], data=[]]
return self._status_cache[info.id] |
def get_notes(self, folderid="", offset=0, limit=10):
"""Fetch notes
:param folderid: The UUID of the folder to fetch notes from
:param offset: the pagination offset
:param limit: the pagination limit
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes', {
'folderid' : folderid,
'offset' : offset,
'limit' : limit
})
notes = []
for item in response['results']:
n = {}
n['noteid'] = item['noteid']
n['ts'] = item['ts']
n['unread'] = item['unread']
n['starred'] = item['starred']
n['sent'] = item['sent']
n['subject'] = item['subject']
n['preview'] = item['preview']
n['body'] = item['body']
n['user'] = User()
n['user'].from_dict(item['user'])
n['recipients'] = []
for recipient_item in item['recipients']:
u = User()
u.from_dict(recipient_item)
n['recipients'].append(u)
notes.append(n)
return {
"results" : notes,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} | def function[get_notes, parameter[self, folderid, offset, limit]]:
constant[Fetch notes
:param folderid: The UUID of the folder to fetch notes from
:param offset: the pagination offset
:param limit: the pagination limit
]
if compare[name[self].standard_grant_type is_not constant[authorization_code]] begin[:]
<ast.Raise object at 0x7da20c6e4c10>
variable[response] assign[=] call[name[self]._req, parameter[constant[/notes], dictionary[[<ast.Constant object at 0x7da20c6e5270>, <ast.Constant object at 0x7da20c6e4d90>, <ast.Constant object at 0x7da20c6e5120>], [<ast.Name object at 0x7da20c6e5150>, <ast.Name object at 0x7da20c6e5420>, <ast.Name object at 0x7da20c6e5300>]]]]
variable[notes] assign[=] list[[]]
for taget[name[item]] in starred[call[name[response]][constant[results]]] begin[:]
variable[n] assign[=] dictionary[[], []]
call[name[n]][constant[noteid]] assign[=] call[name[item]][constant[noteid]]
call[name[n]][constant[ts]] assign[=] call[name[item]][constant[ts]]
call[name[n]][constant[unread]] assign[=] call[name[item]][constant[unread]]
call[name[n]][constant[starred]] assign[=] call[name[item]][constant[starred]]
call[name[n]][constant[sent]] assign[=] call[name[item]][constant[sent]]
call[name[n]][constant[subject]] assign[=] call[name[item]][constant[subject]]
call[name[n]][constant[preview]] assign[=] call[name[item]][constant[preview]]
call[name[n]][constant[body]] assign[=] call[name[item]][constant[body]]
call[name[n]][constant[user]] assign[=] call[name[User], parameter[]]
call[call[name[n]][constant[user]].from_dict, parameter[call[name[item]][constant[user]]]]
call[name[n]][constant[recipients]] assign[=] list[[]]
for taget[name[recipient_item]] in starred[call[name[item]][constant[recipients]]] begin[:]
variable[u] assign[=] call[name[User], parameter[]]
call[name[u].from_dict, parameter[name[recipient_item]]]
call[call[name[n]][constant[recipients]].append, parameter[name[u]]]
call[name[notes].append, parameter[name[n]]]
return[dictionary[[<ast.Constant object at 0x7da18ede7130>, <ast.Constant object at 0x7da18ede7ee0>, <ast.Constant object at 0x7da18ede5840>], [<ast.Name object at 0x7da18ede5b10>, <ast.Subscript object at 0x7da18ede65f0>, <ast.Subscript object at 0x7da18ede4310>]]] | keyword[def] identifier[get_notes] ( identifier[self] , identifier[folderid] = literal[string] , identifier[offset] = literal[int] , identifier[limit] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[standard_grant_type] keyword[is] keyword[not] literal[string] :
keyword[raise] identifier[DeviantartError] ( literal[string] )
identifier[response] = identifier[self] . identifier[_req] ( literal[string] ,{
literal[string] : identifier[folderid] ,
literal[string] : identifier[offset] ,
literal[string] : identifier[limit]
})
identifier[notes] =[]
keyword[for] identifier[item] keyword[in] identifier[response] [ literal[string] ]:
identifier[n] ={}
identifier[n] [ literal[string] ]= identifier[item] [ literal[string] ]
identifier[n] [ literal[string] ]= identifier[item] [ literal[string] ]
identifier[n] [ literal[string] ]= identifier[item] [ literal[string] ]
identifier[n] [ literal[string] ]= identifier[item] [ literal[string] ]
identifier[n] [ literal[string] ]= identifier[item] [ literal[string] ]
identifier[n] [ literal[string] ]= identifier[item] [ literal[string] ]
identifier[n] [ literal[string] ]= identifier[item] [ literal[string] ]
identifier[n] [ literal[string] ]= identifier[item] [ literal[string] ]
identifier[n] [ literal[string] ]= identifier[User] ()
identifier[n] [ literal[string] ]. identifier[from_dict] ( identifier[item] [ literal[string] ])
identifier[n] [ literal[string] ]=[]
keyword[for] identifier[recipient_item] keyword[in] identifier[item] [ literal[string] ]:
identifier[u] = identifier[User] ()
identifier[u] . identifier[from_dict] ( identifier[recipient_item] )
identifier[n] [ literal[string] ]. identifier[append] ( identifier[u] )
identifier[notes] . identifier[append] ( identifier[n] )
keyword[return] {
literal[string] : identifier[notes] ,
literal[string] : identifier[response] [ literal[string] ],
literal[string] : identifier[response] [ literal[string] ]
} | def get_notes(self, folderid='', offset=0, limit=10):
"""Fetch notes
:param folderid: The UUID of the folder to fetch notes from
:param offset: the pagination offset
:param limit: the pagination limit
"""
if self.standard_grant_type is not 'authorization_code':
raise DeviantartError('Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.') # depends on [control=['if'], data=[]]
response = self._req('/notes', {'folderid': folderid, 'offset': offset, 'limit': limit})
notes = []
for item in response['results']:
n = {}
n['noteid'] = item['noteid']
n['ts'] = item['ts']
n['unread'] = item['unread']
n['starred'] = item['starred']
n['sent'] = item['sent']
n['subject'] = item['subject']
n['preview'] = item['preview']
n['body'] = item['body']
n['user'] = User()
n['user'].from_dict(item['user'])
n['recipients'] = []
for recipient_item in item['recipients']:
u = User()
u.from_dict(recipient_item)
n['recipients'].append(u) # depends on [control=['for'], data=['recipient_item']]
notes.append(n) # depends on [control=['for'], data=['item']]
return {'results': notes, 'has_more': response['has_more'], 'next_offset': response['next_offset']} |
def _make_name(current, new):
'''
Stops duplication between similarly named nested deploys, eg:
Turn:
Deploy Kubernetes master/Configure Kubernetes
Into:
Deploy Kubernetes master/Configure
'''
current_tokens = current.split()
new_tokens = new.split()
new = ' '.join(
new_token for new_token in new_tokens
if new_token not in current_tokens
)
return '/'.join((current, new)) | def function[_make_name, parameter[current, new]]:
constant[
Stops duplication between similarly named nested deploys, eg:
Turn:
Deploy Kubernetes master/Configure Kubernetes
Into:
Deploy Kubernetes master/Configure
]
variable[current_tokens] assign[=] call[name[current].split, parameter[]]
variable[new_tokens] assign[=] call[name[new].split, parameter[]]
variable[new] assign[=] call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da18bc707c0>]]
return[call[constant[/].join, parameter[tuple[[<ast.Name object at 0x7da20c993df0>, <ast.Name object at 0x7da20c990070>]]]]] | keyword[def] identifier[_make_name] ( identifier[current] , identifier[new] ):
literal[string]
identifier[current_tokens] = identifier[current] . identifier[split] ()
identifier[new_tokens] = identifier[new] . identifier[split] ()
identifier[new] = literal[string] . identifier[join] (
identifier[new_token] keyword[for] identifier[new_token] keyword[in] identifier[new_tokens]
keyword[if] identifier[new_token] keyword[not] keyword[in] identifier[current_tokens]
)
keyword[return] literal[string] . identifier[join] (( identifier[current] , identifier[new] )) | def _make_name(current, new):
"""
Stops duplication between similarly named nested deploys, eg:
Turn:
Deploy Kubernetes master/Configure Kubernetes
Into:
Deploy Kubernetes master/Configure
"""
current_tokens = current.split()
new_tokens = new.split()
new = ' '.join((new_token for new_token in new_tokens if new_token not in current_tokens))
return '/'.join((current, new)) |
def is_ISS_above(self, latitude, longitude, altitude=None):
"""Location of the ISS regardin the current location.
:param latitude: latitude in degrees of location you want iss pass
above
:type latitude: float
:param longitude: longitude in degrees of location you want iss pass
above
:type longitude: float
:param altitude: altitude in meters of location you want iss pass
above, default is 100 when not given
:type altitude: float
:return: True if the ISS is above the location, False if not
:rtype: bool
"""
test = self.pass_times(latitude, longitude, altitude, 2)
# 2 results where asked so if API return only 1, that mean ISS is
# above the location
return len(test) is 1 | def function[is_ISS_above, parameter[self, latitude, longitude, altitude]]:
constant[Location of the ISS regardin the current location.
:param latitude: latitude in degrees of location you want iss pass
above
:type latitude: float
:param longitude: longitude in degrees of location you want iss pass
above
:type longitude: float
:param altitude: altitude in meters of location you want iss pass
above, default is 100 when not given
:type altitude: float
:return: True if the ISS is above the location, False if not
:rtype: bool
]
variable[test] assign[=] call[name[self].pass_times, parameter[name[latitude], name[longitude], name[altitude], constant[2]]]
return[compare[call[name[len], parameter[name[test]]] is constant[1]]] | keyword[def] identifier[is_ISS_above] ( identifier[self] , identifier[latitude] , identifier[longitude] , identifier[altitude] = keyword[None] ):
literal[string]
identifier[test] = identifier[self] . identifier[pass_times] ( identifier[latitude] , identifier[longitude] , identifier[altitude] , literal[int] )
keyword[return] identifier[len] ( identifier[test] ) keyword[is] literal[int] | def is_ISS_above(self, latitude, longitude, altitude=None):
"""Location of the ISS regardin the current location.
:param latitude: latitude in degrees of location you want iss pass
above
:type latitude: float
:param longitude: longitude in degrees of location you want iss pass
above
:type longitude: float
:param altitude: altitude in meters of location you want iss pass
above, default is 100 when not given
:type altitude: float
:return: True if the ISS is above the location, False if not
:rtype: bool
"""
test = self.pass_times(latitude, longitude, altitude, 2)
# 2 results where asked so if API return only 1, that mean ISS is
# above the location
return len(test) is 1 |
def post_event(self, id, **data):
"""
POST /events/:id/
Updates an event. Returns an :format:`event` for the specified event. Does not support updating a repeating event
series parent (see POST /series/:id/).
"""
return self.post("/events/{0}/".format(id), data=data) | def function[post_event, parameter[self, id]]:
constant[
POST /events/:id/
Updates an event. Returns an :format:`event` for the specified event. Does not support updating a repeating event
series parent (see POST /series/:id/).
]
return[call[name[self].post, parameter[call[constant[/events/{0}/].format, parameter[name[id]]]]]] | keyword[def] identifier[post_event] ( identifier[self] , identifier[id] ,** identifier[data] ):
literal[string]
keyword[return] identifier[self] . identifier[post] ( literal[string] . identifier[format] ( identifier[id] ), identifier[data] = identifier[data] ) | def post_event(self, id, **data):
"""
POST /events/:id/
Updates an event. Returns an :format:`event` for the specified event. Does not support updating a repeating event
series parent (see POST /series/:id/).
"""
return self.post('/events/{0}/'.format(id), data=data) |
def new(self):
# type: () -> None
'''
A method to create a new UDF Partition Map.
Parameters:
None.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Map already initialized')
self.part_num = 0 # FIXME: we should let the user set this
self._initialized = True | def function[new, parameter[self]]:
constant[
A method to create a new UDF Partition Map.
Parameters:
None.
Returns:
Nothing.
]
if name[self]._initialized begin[:]
<ast.Raise object at 0x7da1b0f0e2c0>
name[self].part_num assign[=] constant[0]
name[self]._initialized assign[=] constant[True] | keyword[def] identifier[new] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_initialized] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
identifier[self] . identifier[part_num] = literal[int]
identifier[self] . identifier[_initialized] = keyword[True] | def new(self):
# type: () -> None
'\n A method to create a new UDF Partition Map.\n\n Parameters:\n None.\n Returns:\n Nothing.\n '
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Map already initialized') # depends on [control=['if'], data=[]]
self.part_num = 0 # FIXME: we should let the user set this
self._initialized = True |
def get_object(cls, api_token, id):
"""
Class method that will return a LoadBalancer object by its ID.
Args:
api_token (str): DigitalOcean API token
id (str): Load Balancer ID
"""
load_balancer = cls(token=api_token, id=id)
load_balancer.load()
return load_balancer | def function[get_object, parameter[cls, api_token, id]]:
constant[
Class method that will return a LoadBalancer object by its ID.
Args:
api_token (str): DigitalOcean API token
id (str): Load Balancer ID
]
variable[load_balancer] assign[=] call[name[cls], parameter[]]
call[name[load_balancer].load, parameter[]]
return[name[load_balancer]] | keyword[def] identifier[get_object] ( identifier[cls] , identifier[api_token] , identifier[id] ):
literal[string]
identifier[load_balancer] = identifier[cls] ( identifier[token] = identifier[api_token] , identifier[id] = identifier[id] )
identifier[load_balancer] . identifier[load] ()
keyword[return] identifier[load_balancer] | def get_object(cls, api_token, id):
"""
Class method that will return a LoadBalancer object by its ID.
Args:
api_token (str): DigitalOcean API token
id (str): Load Balancer ID
"""
load_balancer = cls(token=api_token, id=id)
load_balancer.load()
return load_balancer |
def _check_multi_statement_line(self, node, line):
"""Check for lines containing multiple statements."""
# Do not warn about multiple nested context managers
# in with statements.
if isinstance(node, nodes.With):
return
# For try... except... finally..., the two nodes
# appear to be on the same line due to how the AST is built.
if isinstance(node, nodes.TryExcept) and isinstance(
node.parent, nodes.TryFinally
):
return
if (
isinstance(node.parent, nodes.If)
and not node.parent.orelse
and self.config.single_line_if_stmt
):
return
if (
isinstance(node.parent, nodes.ClassDef)
and len(node.parent.body) == 1
and self.config.single_line_class_stmt
):
return
self.add_message("multiple-statements", node=node)
self._visited_lines[line] = 2 | def function[_check_multi_statement_line, parameter[self, node, line]]:
constant[Check for lines containing multiple statements.]
if call[name[isinstance], parameter[name[node], name[nodes].With]] begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b0314730> begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b0315c90> begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b03172e0> begin[:]
return[None]
call[name[self].add_message, parameter[constant[multiple-statements]]]
call[name[self]._visited_lines][name[line]] assign[=] constant[2] | keyword[def] identifier[_check_multi_statement_line] ( identifier[self] , identifier[node] , identifier[line] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[node] , identifier[nodes] . identifier[With] ):
keyword[return]
keyword[if] identifier[isinstance] ( identifier[node] , identifier[nodes] . identifier[TryExcept] ) keyword[and] identifier[isinstance] (
identifier[node] . identifier[parent] , identifier[nodes] . identifier[TryFinally]
):
keyword[return]
keyword[if] (
identifier[isinstance] ( identifier[node] . identifier[parent] , identifier[nodes] . identifier[If] )
keyword[and] keyword[not] identifier[node] . identifier[parent] . identifier[orelse]
keyword[and] identifier[self] . identifier[config] . identifier[single_line_if_stmt]
):
keyword[return]
keyword[if] (
identifier[isinstance] ( identifier[node] . identifier[parent] , identifier[nodes] . identifier[ClassDef] )
keyword[and] identifier[len] ( identifier[node] . identifier[parent] . identifier[body] )== literal[int]
keyword[and] identifier[self] . identifier[config] . identifier[single_line_class_stmt]
):
keyword[return]
identifier[self] . identifier[add_message] ( literal[string] , identifier[node] = identifier[node] )
identifier[self] . identifier[_visited_lines] [ identifier[line] ]= literal[int] | def _check_multi_statement_line(self, node, line):
"""Check for lines containing multiple statements."""
# Do not warn about multiple nested context managers
# in with statements.
if isinstance(node, nodes.With):
return # depends on [control=['if'], data=[]]
# For try... except... finally..., the two nodes
# appear to be on the same line due to how the AST is built.
if isinstance(node, nodes.TryExcept) and isinstance(node.parent, nodes.TryFinally):
return # depends on [control=['if'], data=[]]
if isinstance(node.parent, nodes.If) and (not node.parent.orelse) and self.config.single_line_if_stmt:
return # depends on [control=['if'], data=[]]
if isinstance(node.parent, nodes.ClassDef) and len(node.parent.body) == 1 and self.config.single_line_class_stmt:
return # depends on [control=['if'], data=[]]
self.add_message('multiple-statements', node=node)
self._visited_lines[line] = 2 |
def stop_transmit(self, fd):
"""
Stop yielding writeability events for `fd`.
Redundant calls to :meth:`stop_transmit` are silently ignored, this may
change in future.
"""
self._wfds.pop(fd, None)
self._update(fd) | def function[stop_transmit, parameter[self, fd]]:
constant[
Stop yielding writeability events for `fd`.
Redundant calls to :meth:`stop_transmit` are silently ignored, this may
change in future.
]
call[name[self]._wfds.pop, parameter[name[fd], constant[None]]]
call[name[self]._update, parameter[name[fd]]] | keyword[def] identifier[stop_transmit] ( identifier[self] , identifier[fd] ):
literal[string]
identifier[self] . identifier[_wfds] . identifier[pop] ( identifier[fd] , keyword[None] )
identifier[self] . identifier[_update] ( identifier[fd] ) | def stop_transmit(self, fd):
"""
Stop yielding writeability events for `fd`.
Redundant calls to :meth:`stop_transmit` are silently ignored, this may
change in future.
"""
self._wfds.pop(fd, None)
self._update(fd) |
def get_commands(self, mission_xml, role):
"""Get commands from xml string as a list of (command_type:int, turnbased:boolean, command:string)"""
mission = etree.fromstring(mission_xml)
return self.get_commands_from_xml(mission, role) | def function[get_commands, parameter[self, mission_xml, role]]:
constant[Get commands from xml string as a list of (command_type:int, turnbased:boolean, command:string)]
variable[mission] assign[=] call[name[etree].fromstring, parameter[name[mission_xml]]]
return[call[name[self].get_commands_from_xml, parameter[name[mission], name[role]]]] | keyword[def] identifier[get_commands] ( identifier[self] , identifier[mission_xml] , identifier[role] ):
literal[string]
identifier[mission] = identifier[etree] . identifier[fromstring] ( identifier[mission_xml] )
keyword[return] identifier[self] . identifier[get_commands_from_xml] ( identifier[mission] , identifier[role] ) | def get_commands(self, mission_xml, role):
"""Get commands from xml string as a list of (command_type:int, turnbased:boolean, command:string)"""
mission = etree.fromstring(mission_xml)
return self.get_commands_from_xml(mission, role) |
def _cont_norm_gaussian_smooth(dataset, L):
""" Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum
Parameters
----------
dataset: Dataset
the dataset to continuum normalize
L: float
the width of the Gaussian used for weighting
Returns
-------
dataset: Dataset
updated dataset
"""
print("Gaussian smoothing the entire dataset...")
w = gaussian_weight_matrix(dataset.wl, L)
print("Gaussian smoothing the training set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.tr_flux, dataset.tr_ivar, w)
norm_tr_flux, norm_tr_ivar = _cont_norm(
dataset.tr_flux, dataset.tr_ivar, cont)
print("Gaussian smoothing the test set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.test_flux, dataset.test_ivar, w)
norm_test_flux, norm_test_ivar = _cont_norm(
dataset.test_flux, dataset.test_ivar, cont)
return norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar | def function[_cont_norm_gaussian_smooth, parameter[dataset, L]]:
constant[ Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum
Parameters
----------
dataset: Dataset
the dataset to continuum normalize
L: float
the width of the Gaussian used for weighting
Returns
-------
dataset: Dataset
updated dataset
]
call[name[print], parameter[constant[Gaussian smoothing the entire dataset...]]]
variable[w] assign[=] call[name[gaussian_weight_matrix], parameter[name[dataset].wl, name[L]]]
call[name[print], parameter[constant[Gaussian smoothing the training set]]]
variable[cont] assign[=] call[name[_find_cont_gaussian_smooth], parameter[name[dataset].wl, name[dataset].tr_flux, name[dataset].tr_ivar, name[w]]]
<ast.Tuple object at 0x7da207f01180> assign[=] call[name[_cont_norm], parameter[name[dataset].tr_flux, name[dataset].tr_ivar, name[cont]]]
call[name[print], parameter[constant[Gaussian smoothing the test set]]]
variable[cont] assign[=] call[name[_find_cont_gaussian_smooth], parameter[name[dataset].wl, name[dataset].test_flux, name[dataset].test_ivar, name[w]]]
<ast.Tuple object at 0x7da2041d9360> assign[=] call[name[_cont_norm], parameter[name[dataset].test_flux, name[dataset].test_ivar, name[cont]]]
return[tuple[[<ast.Name object at 0x7da1b10c1810>, <ast.Name object at 0x7da1b10c36d0>, <ast.Name object at 0x7da1b10c1d50>, <ast.Name object at 0x7da1b10c25c0>]]] | keyword[def] identifier[_cont_norm_gaussian_smooth] ( identifier[dataset] , identifier[L] ):
literal[string]
identifier[print] ( literal[string] )
identifier[w] = identifier[gaussian_weight_matrix] ( identifier[dataset] . identifier[wl] , identifier[L] )
identifier[print] ( literal[string] )
identifier[cont] = identifier[_find_cont_gaussian_smooth] (
identifier[dataset] . identifier[wl] , identifier[dataset] . identifier[tr_flux] , identifier[dataset] . identifier[tr_ivar] , identifier[w] )
identifier[norm_tr_flux] , identifier[norm_tr_ivar] = identifier[_cont_norm] (
identifier[dataset] . identifier[tr_flux] , identifier[dataset] . identifier[tr_ivar] , identifier[cont] )
identifier[print] ( literal[string] )
identifier[cont] = identifier[_find_cont_gaussian_smooth] (
identifier[dataset] . identifier[wl] , identifier[dataset] . identifier[test_flux] , identifier[dataset] . identifier[test_ivar] , identifier[w] )
identifier[norm_test_flux] , identifier[norm_test_ivar] = identifier[_cont_norm] (
identifier[dataset] . identifier[test_flux] , identifier[dataset] . identifier[test_ivar] , identifier[cont] )
keyword[return] identifier[norm_tr_flux] , identifier[norm_tr_ivar] , identifier[norm_test_flux] , identifier[norm_test_ivar] | def _cont_norm_gaussian_smooth(dataset, L):
""" Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum
Parameters
----------
dataset: Dataset
the dataset to continuum normalize
L: float
the width of the Gaussian used for weighting
Returns
-------
dataset: Dataset
updated dataset
"""
print('Gaussian smoothing the entire dataset...')
w = gaussian_weight_matrix(dataset.wl, L)
print('Gaussian smoothing the training set')
cont = _find_cont_gaussian_smooth(dataset.wl, dataset.tr_flux, dataset.tr_ivar, w)
(norm_tr_flux, norm_tr_ivar) = _cont_norm(dataset.tr_flux, dataset.tr_ivar, cont)
print('Gaussian smoothing the test set')
cont = _find_cont_gaussian_smooth(dataset.wl, dataset.test_flux, dataset.test_ivar, w)
(norm_test_flux, norm_test_ivar) = _cont_norm(dataset.test_flux, dataset.test_ivar, cont)
return (norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar) |
def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value)
except Error as e:
error = e
raise error
return built | def function[Any, parameter[]]:
constant[
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
]
def function[built, parameter[value]]:
variable[error] assign[=] constant[None]
for taget[name[validator]] in starred[name[validators]] begin[:]
<ast.Try object at 0x7da1b26ad540>
<ast.Raise object at 0x7da1b26ac1f0>
return[name[built]] | keyword[def] identifier[Any] (* identifier[validators] ):
literal[string]
@ identifier[wraps] ( identifier[Any] )
keyword[def] identifier[built] ( identifier[value] ):
identifier[error] = keyword[None]
keyword[for] identifier[validator] keyword[in] identifier[validators] :
keyword[try] :
keyword[return] identifier[validator] ( identifier[value] )
keyword[except] identifier[Error] keyword[as] identifier[e] :
identifier[error] = identifier[e]
keyword[raise] identifier[error]
keyword[return] identifier[built] | def Any(*validators):
"""
Combines all the given validator callables into one, running the given
value through them in sequence until a valid result is given.
"""
@wraps(Any)
def built(value):
error = None
for validator in validators:
try:
return validator(value) # depends on [control=['try'], data=[]]
except Error as e:
error = e # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['validator']]
raise error
return built |
def decorate_event_js(js_code):
"""setup a method as an event, adding also javascript code to generate
Args:
js_code (str): javascript code to generate the event client-side.
js_code is added to the widget html as
widget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}
"""
def add_annotation(method):
setattr(method, "__is_event", True )
setattr(method, "_js_code", js_code )
return method
return add_annotation | def function[decorate_event_js, parameter[js_code]]:
constant[setup a method as an event, adding also javascript code to generate
Args:
js_code (str): javascript code to generate the event client-side.
js_code is added to the widget html as
widget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}
]
def function[add_annotation, parameter[method]]:
call[name[setattr], parameter[name[method], constant[__is_event], constant[True]]]
call[name[setattr], parameter[name[method], constant[_js_code], name[js_code]]]
return[name[method]]
return[name[add_annotation]] | keyword[def] identifier[decorate_event_js] ( identifier[js_code] ):
literal[string]
keyword[def] identifier[add_annotation] ( identifier[method] ):
identifier[setattr] ( identifier[method] , literal[string] , keyword[True] )
identifier[setattr] ( identifier[method] , literal[string] , identifier[js_code] )
keyword[return] identifier[method]
keyword[return] identifier[add_annotation] | def decorate_event_js(js_code):
"""setup a method as an event, adding also javascript code to generate
Args:
js_code (str): javascript code to generate the event client-side.
js_code is added to the widget html as
widget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'}
"""
def add_annotation(method):
setattr(method, '__is_event', True)
setattr(method, '_js_code', js_code)
return method
return add_annotation |
def _update_request_uri_query(self, request):
'''pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters'''
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += name + '=' + url_quote(value, '/()$=\',') + '&'
request.path = request.path[:-1]
return request.path, request.query | def function[_update_request_uri_query, parameter[self, request]]:
constant[pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters]
if compare[constant[?] in name[request].path] begin[:]
<ast.Tuple object at 0x7da2045641c0> assign[=] call[name[request].path.partition, parameter[constant[?]]]
if name[query_string] begin[:]
variable[query_params] assign[=] call[name[query_string].split, parameter[constant[&]]]
for taget[name[query]] in starred[name[query_params]] begin[:]
if compare[constant[=] in name[query]] begin[:]
<ast.Tuple object at 0x7da204564c70> assign[=] call[name[query].partition, parameter[constant[=]]]
call[name[request].query.append, parameter[tuple[[<ast.Name object at 0x7da1b0355480>, <ast.Name object at 0x7da1b0356e00>]]]]
name[request].path assign[=] call[name[url_quote], parameter[name[request].path, constant[/()$=',]]]
if name[request].query begin[:]
<ast.AugAssign object at 0x7da1b03563e0>
for taget[tuple[[<ast.Name object at 0x7da1b03577c0>, <ast.Name object at 0x7da1b0354970>]]] in starred[name[request].query] begin[:]
if compare[name[value] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b0354700>
name[request].path assign[=] call[name[request].path][<ast.Slice object at 0x7da1b0354be0>]
return[tuple[[<ast.Attribute object at 0x7da1b0357160>, <ast.Attribute object at 0x7da1b03572b0>]]] | keyword[def] identifier[_update_request_uri_query] ( identifier[self] , identifier[request] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[request] . identifier[path] :
identifier[request] . identifier[path] , identifier[_] , identifier[query_string] = identifier[request] . identifier[path] . identifier[partition] ( literal[string] )
keyword[if] identifier[query_string] :
identifier[query_params] = identifier[query_string] . identifier[split] ( literal[string] )
keyword[for] identifier[query] keyword[in] identifier[query_params] :
keyword[if] literal[string] keyword[in] identifier[query] :
identifier[name] , identifier[_] , identifier[value] = identifier[query] . identifier[partition] ( literal[string] )
identifier[request] . identifier[query] . identifier[append] (( identifier[name] , identifier[value] ))
identifier[request] . identifier[path] = identifier[url_quote] ( identifier[request] . identifier[path] , literal[string] )
keyword[if] identifier[request] . identifier[query] :
identifier[request] . identifier[path] += literal[string]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[request] . identifier[query] :
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[request] . identifier[path] += identifier[name] + literal[string] + identifier[url_quote] ( identifier[value] , literal[string] )+ literal[string]
identifier[request] . identifier[path] = identifier[request] . identifier[path] [:- literal[int] ]
keyword[return] identifier[request] . identifier[path] , identifier[request] . identifier[query] | def _update_request_uri_query(self, request):
"""pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters"""
if '?' in request.path:
(request.path, _, query_string) = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
(name, _, value) = query.partition('=')
request.query.append((name, value)) # depends on [control=['if'], data=['query']] # depends on [control=['for'], data=['query']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
request.path = url_quote(request.path, "/()$=',")
# add encoded queries to request.path.
if request.query:
request.path += '?'
for (name, value) in request.query:
if value is not None:
request.path += name + '=' + url_quote(value, "/()$=',") + '&' # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=[]]
request.path = request.path[:-1] # depends on [control=['if'], data=[]]
return (request.path, request.query) |
def _roll_random(rng, n):
"returns a random # from 0 to N-1"
bits = util.bit_length(n-1)
bytes = (bits + 7) // 8
hbyte_mask = pow(2, bits % 8) - 1
# so here's the plan:
# we fetch as many random bits as we'd need to fit N-1, and if the
# generated number is >= N, we try again. in the worst case (N-1 is a
# power of 2), we have slightly better than 50% odds of getting one that
# fits, so i can't guarantee that this loop will ever finish, but the odds
# of it looping forever should be infinitesimal.
while True:
x = rng.read(bytes)
if hbyte_mask > 0:
x = chr(ord(x[0]) & hbyte_mask) + x[1:]
num = util.inflate_long(x, 1)
if num < n:
break
return num | def function[_roll_random, parameter[rng, n]]:
constant[returns a random # from 0 to N-1]
variable[bits] assign[=] call[name[util].bit_length, parameter[binary_operation[name[n] - constant[1]]]]
variable[bytes] assign[=] binary_operation[binary_operation[name[bits] + constant[7]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[8]]
variable[hbyte_mask] assign[=] binary_operation[call[name[pow], parameter[constant[2], binary_operation[name[bits] <ast.Mod object at 0x7da2590d6920> constant[8]]]] - constant[1]]
while constant[True] begin[:]
variable[x] assign[=] call[name[rng].read, parameter[name[bytes]]]
if compare[name[hbyte_mask] greater[>] constant[0]] begin[:]
variable[x] assign[=] binary_operation[call[name[chr], parameter[binary_operation[call[name[ord], parameter[call[name[x]][constant[0]]]] <ast.BitAnd object at 0x7da2590d6b60> name[hbyte_mask]]]] + call[name[x]][<ast.Slice object at 0x7da1b10c4f70>]]
variable[num] assign[=] call[name[util].inflate_long, parameter[name[x], constant[1]]]
if compare[name[num] less[<] name[n]] begin[:]
break
return[name[num]] | keyword[def] identifier[_roll_random] ( identifier[rng] , identifier[n] ):
literal[string]
identifier[bits] = identifier[util] . identifier[bit_length] ( identifier[n] - literal[int] )
identifier[bytes] =( identifier[bits] + literal[int] )// literal[int]
identifier[hbyte_mask] = identifier[pow] ( literal[int] , identifier[bits] % literal[int] )- literal[int]
keyword[while] keyword[True] :
identifier[x] = identifier[rng] . identifier[read] ( identifier[bytes] )
keyword[if] identifier[hbyte_mask] > literal[int] :
identifier[x] = identifier[chr] ( identifier[ord] ( identifier[x] [ literal[int] ])& identifier[hbyte_mask] )+ identifier[x] [ literal[int] :]
identifier[num] = identifier[util] . identifier[inflate_long] ( identifier[x] , literal[int] )
keyword[if] identifier[num] < identifier[n] :
keyword[break]
keyword[return] identifier[num] | def _roll_random(rng, n):
"""returns a random # from 0 to N-1"""
bits = util.bit_length(n - 1)
bytes = (bits + 7) // 8
hbyte_mask = pow(2, bits % 8) - 1
# so here's the plan:
# we fetch as many random bits as we'd need to fit N-1, and if the
# generated number is >= N, we try again. in the worst case (N-1 is a
# power of 2), we have slightly better than 50% odds of getting one that
# fits, so i can't guarantee that this loop will ever finish, but the odds
# of it looping forever should be infinitesimal.
while True:
x = rng.read(bytes)
if hbyte_mask > 0:
x = chr(ord(x[0]) & hbyte_mask) + x[1:] # depends on [control=['if'], data=['hbyte_mask']]
num = util.inflate_long(x, 1)
if num < n:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return num |
def env_export(prefix, exported, env):
"""
Define the list of 'exported' variables with 'prefix' with values from 'env'
"""
for exp in exported:
ENV["_".join([prefix, exp])] = env[exp] | def function[env_export, parameter[prefix, exported, env]]:
constant[
Define the list of 'exported' variables with 'prefix' with values from 'env'
]
for taget[name[exp]] in starred[name[exported]] begin[:]
call[name[ENV]][call[constant[_].join, parameter[list[[<ast.Name object at 0x7da1b0124bb0>, <ast.Name object at 0x7da1b0127070>]]]]] assign[=] call[name[env]][name[exp]] | keyword[def] identifier[env_export] ( identifier[prefix] , identifier[exported] , identifier[env] ):
literal[string]
keyword[for] identifier[exp] keyword[in] identifier[exported] :
identifier[ENV] [ literal[string] . identifier[join] ([ identifier[prefix] , identifier[exp] ])]= identifier[env] [ identifier[exp] ] | def env_export(prefix, exported, env):
"""
Define the list of 'exported' variables with 'prefix' with values from 'env'
"""
for exp in exported:
ENV['_'.join([prefix, exp])] = env[exp] # depends on [control=['for'], data=['exp']] |
def is_token_from_emulator(auth_header: str) -> bool:
""" Determines if a given Auth header is from the Bot Framework Emulator
:param auth_header: Bearer Token, in the 'Bearer [Long String]' Format.
:type auth_header: str
:return: True, if the token was issued by the Emulator. Otherwise, false.
"""
# The Auth Header generally looks like this:
# "Bearer eyJ0e[...Big Long String...]XAiO"
if not auth_header:
# No token. Can't be an emulator token.
return False
parts = auth_header.split(' ')
if len(parts) != 2:
# Emulator tokens MUST have exactly 2 parts.
# If we don't have 2 parts, it's not an emulator token
return False
auth_scheme = parts[0]
bearer_token = parts[1]
# We now have an array that should be:
# [0] = "Bearer"
# [1] = "[Big Long String]"
if auth_scheme != 'Bearer':
# The scheme from the emulator MUST be "Bearer"
return False
# Parse the Big Long String into an actual token.
token = jwt.decode(bearer_token, verify=False)
if not token:
return False
# Is there an Issuer?
issuer = token['iss']
if not issuer:
# No Issuer, means it's not from the Emulator.
return False
# Is the token issues by a source we consider to be the emulator?
issuer_list = EmulatorValidation.TO_BOT_FROM_EMULATOR_TOKEN_VALIDATION_PARAMETERS.issuer
if issuer_list and not issuer in issuer_list:
# Not a Valid Issuer. This is NOT a Bot Framework Emulator Token.
return False
# The Token is from the Bot Framework Emulator. Success!
return True | def function[is_token_from_emulator, parameter[auth_header]]:
constant[ Determines if a given Auth header is from the Bot Framework Emulator
:param auth_header: Bearer Token, in the 'Bearer [Long String]' Format.
:type auth_header: str
:return: True, if the token was issued by the Emulator. Otherwise, false.
]
if <ast.UnaryOp object at 0x7da1b05fb700> begin[:]
return[constant[False]]
variable[parts] assign[=] call[name[auth_header].split, parameter[constant[ ]]]
if compare[call[name[len], parameter[name[parts]]] not_equal[!=] constant[2]] begin[:]
return[constant[False]]
variable[auth_scheme] assign[=] call[name[parts]][constant[0]]
variable[bearer_token] assign[=] call[name[parts]][constant[1]]
if compare[name[auth_scheme] not_equal[!=] constant[Bearer]] begin[:]
return[constant[False]]
variable[token] assign[=] call[name[jwt].decode, parameter[name[bearer_token]]]
if <ast.UnaryOp object at 0x7da1b05f9ed0> begin[:]
return[constant[False]]
variable[issuer] assign[=] call[name[token]][constant[iss]]
if <ast.UnaryOp object at 0x7da1b05f9270> begin[:]
return[constant[False]]
variable[issuer_list] assign[=] name[EmulatorValidation].TO_BOT_FROM_EMULATOR_TOKEN_VALIDATION_PARAMETERS.issuer
if <ast.BoolOp object at 0x7da1b05f9960> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_token_from_emulator] ( identifier[auth_header] : identifier[str] )-> identifier[bool] :
literal[string]
keyword[if] keyword[not] identifier[auth_header] :
keyword[return] keyword[False]
identifier[parts] = identifier[auth_header] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[parts] )!= literal[int] :
keyword[return] keyword[False]
identifier[auth_scheme] = identifier[parts] [ literal[int] ]
identifier[bearer_token] = identifier[parts] [ literal[int] ]
keyword[if] identifier[auth_scheme] != literal[string] :
keyword[return] keyword[False]
identifier[token] = identifier[jwt] . identifier[decode] ( identifier[bearer_token] , identifier[verify] = keyword[False] )
keyword[if] keyword[not] identifier[token] :
keyword[return] keyword[False]
identifier[issuer] = identifier[token] [ literal[string] ]
keyword[if] keyword[not] identifier[issuer] :
keyword[return] keyword[False]
identifier[issuer_list] = identifier[EmulatorValidation] . identifier[TO_BOT_FROM_EMULATOR_TOKEN_VALIDATION_PARAMETERS] . identifier[issuer]
keyword[if] identifier[issuer_list] keyword[and] keyword[not] identifier[issuer] keyword[in] identifier[issuer_list] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_token_from_emulator(auth_header: str) -> bool:
""" Determines if a given Auth header is from the Bot Framework Emulator
:param auth_header: Bearer Token, in the 'Bearer [Long String]' Format.
:type auth_header: str
:return: True, if the token was issued by the Emulator. Otherwise, false.
"""
# The Auth Header generally looks like this:
# "Bearer eyJ0e[...Big Long String...]XAiO"
if not auth_header:
# No token. Can't be an emulator token.
return False # depends on [control=['if'], data=[]]
parts = auth_header.split(' ')
if len(parts) != 2:
# Emulator tokens MUST have exactly 2 parts.
# If we don't have 2 parts, it's not an emulator token
return False # depends on [control=['if'], data=[]]
auth_scheme = parts[0]
bearer_token = parts[1]
# We now have an array that should be:
# [0] = "Bearer"
# [1] = "[Big Long String]"
if auth_scheme != 'Bearer':
# The scheme from the emulator MUST be "Bearer"
return False # depends on [control=['if'], data=[]]
# Parse the Big Long String into an actual token.
token = jwt.decode(bearer_token, verify=False)
if not token:
return False # depends on [control=['if'], data=[]]
# Is there an Issuer?
issuer = token['iss']
if not issuer:
# No Issuer, means it's not from the Emulator.
return False # depends on [control=['if'], data=[]]
# Is the token issues by a source we consider to be the emulator?
issuer_list = EmulatorValidation.TO_BOT_FROM_EMULATOR_TOKEN_VALIDATION_PARAMETERS.issuer
if issuer_list and (not issuer in issuer_list):
# Not a Valid Issuer. This is NOT a Bot Framework Emulator Token.
return False # depends on [control=['if'], data=[]]
# The Token is from the Bot Framework Emulator. Success!
return True |
async def set_chat_title(self, chat_id: typing.Union[base.Integer, base.String],
title: base.String) -> base.Boolean:
"""
Use this method to change the title of a chat. Titles can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#setchattitle
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param title: New chat title, 1-255 characters
:type title: :obj:`base.String`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_CHAT_TITLE, payload)
return result | <ast.AsyncFunctionDef object at 0x7da1b17d5630> | keyword[async] keyword[def] identifier[set_chat_title] ( identifier[self] , identifier[chat_id] : identifier[typing] . identifier[Union] [ identifier[base] . identifier[Integer] , identifier[base] . identifier[String] ],
identifier[title] : identifier[base] . identifier[String] )-> identifier[base] . identifier[Boolean] :
literal[string]
identifier[payload] = identifier[generate_payload] (** identifier[locals] ())
identifier[result] = keyword[await] identifier[self] . identifier[request] ( identifier[api] . identifier[Methods] . identifier[SET_CHAT_TITLE] , identifier[payload] )
keyword[return] identifier[result] | async def set_chat_title(self, chat_id: typing.Union[base.Integer, base.String], title: base.String) -> base.Boolean:
"""
Use this method to change the title of a chat. Titles can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#setchattitle
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param title: New chat title, 1-255 characters
:type title: :obj:`base.String`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_CHAT_TITLE, payload)
return result |
def event_return(events):
'''
Write event data (return data and non-return data) to file on the master.
'''
if not events:
# events is an empty list.
# Don't open the logfile in vain.
return
opts = _get_options({}) # Pass in empty ret, since this is a list of events
try:
with salt.utils.files.flopen(opts['filename'], 'a') as logfile:
for event in events:
salt.utils.json.dump(event, logfile)
logfile.write(str('\n')) # future lint: disable=blacklisted-function
except Exception:
log.error('Could not write to rawdata_json file %s', opts['filename'])
raise | def function[event_return, parameter[events]]:
constant[
Write event data (return data and non-return data) to file on the master.
]
if <ast.UnaryOp object at 0x7da20e74b790> begin[:]
return[None]
variable[opts] assign[=] call[name[_get_options], parameter[dictionary[[], []]]]
<ast.Try object at 0x7da207f01600> | keyword[def] identifier[event_return] ( identifier[events] ):
literal[string]
keyword[if] keyword[not] identifier[events] :
keyword[return]
identifier[opts] = identifier[_get_options] ({})
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[flopen] ( identifier[opts] [ literal[string] ], literal[string] ) keyword[as] identifier[logfile] :
keyword[for] identifier[event] keyword[in] identifier[events] :
identifier[salt] . identifier[utils] . identifier[json] . identifier[dump] ( identifier[event] , identifier[logfile] )
identifier[logfile] . identifier[write] ( identifier[str] ( literal[string] ))
keyword[except] identifier[Exception] :
identifier[log] . identifier[error] ( literal[string] , identifier[opts] [ literal[string] ])
keyword[raise] | def event_return(events):
"""
Write event data (return data and non-return data) to file on the master.
"""
if not events:
# events is an empty list.
# Don't open the logfile in vain.
return # depends on [control=['if'], data=[]]
opts = _get_options({}) # Pass in empty ret, since this is a list of events
try:
with salt.utils.files.flopen(opts['filename'], 'a') as logfile:
for event in events:
salt.utils.json.dump(event, logfile)
logfile.write(str('\n')) # future lint: disable=blacklisted-function # depends on [control=['for'], data=['event']] # depends on [control=['with'], data=['logfile']] # depends on [control=['try'], data=[]]
except Exception:
log.error('Could not write to rawdata_json file %s', opts['filename'])
raise # depends on [control=['except'], data=[]] |
def update_persistent_boot(self, devices=[]):
"""Changes the persistent boot device order for the host
:param devices: ordered list of boot devices
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the given input is not valid.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
# Check if the input is valid
for item in devices:
if item.upper() not in DEVICE_COMMON_TO_REDFISH:
msg = (self._('Invalid input "%(device)s". Valid devices: '
'NETWORK, HDD, ISCSI or CDROM.') %
{'device': item})
raise exception.IloInvalidInputError(msg)
try:
sushy_system.update_persistent_boot(
devices, persistent=True)
except sushy.exceptions.SushyError as e:
msg = (self._('The Redfish controller failed to update '
'persistent boot device %(devices)s.'
'Error: %(error)s') %
{'devices': devices, 'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg) | def function[update_persistent_boot, parameter[self, devices]]:
constant[Changes the persistent boot device order for the host
:param devices: ordered list of boot devices
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the given input is not valid.
]
variable[sushy_system] assign[=] call[name[self]._get_sushy_system, parameter[name[PROLIANT_SYSTEM_ID]]]
for taget[name[item]] in starred[name[devices]] begin[:]
if compare[call[name[item].upper, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[DEVICE_COMMON_TO_REDFISH]] begin[:]
variable[msg] assign[=] binary_operation[call[name[self]._, parameter[constant[Invalid input "%(device)s". Valid devices: NETWORK, HDD, ISCSI or CDROM.]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b197c370>], [<ast.Name object at 0x7da1b197f0d0>]]]
<ast.Raise object at 0x7da1b197e1a0>
<ast.Try object at 0x7da1b197d6f0> | keyword[def] identifier[update_persistent_boot] ( identifier[self] , identifier[devices] =[]):
literal[string]
identifier[sushy_system] = identifier[self] . identifier[_get_sushy_system] ( identifier[PROLIANT_SYSTEM_ID] )
keyword[for] identifier[item] keyword[in] identifier[devices] :
keyword[if] identifier[item] . identifier[upper] () keyword[not] keyword[in] identifier[DEVICE_COMMON_TO_REDFISH] :
identifier[msg] =( identifier[self] . identifier[_] ( literal[string]
literal[string] )%
{ literal[string] : identifier[item] })
keyword[raise] identifier[exception] . identifier[IloInvalidInputError] ( identifier[msg] )
keyword[try] :
identifier[sushy_system] . identifier[update_persistent_boot] (
identifier[devices] , identifier[persistent] = keyword[True] )
keyword[except] identifier[sushy] . identifier[exceptions] . identifier[SushyError] keyword[as] identifier[e] :
identifier[msg] =( identifier[self] . identifier[_] ( literal[string]
literal[string]
literal[string] )%
{ literal[string] : identifier[devices] , literal[string] : identifier[str] ( identifier[e] )})
identifier[LOG] . identifier[debug] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[IloError] ( identifier[msg] ) | def update_persistent_boot(self, devices=[]):
"""Changes the persistent boot device order for the host
:param devices: ordered list of boot devices
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the given input is not valid.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
# Check if the input is valid
for item in devices:
if item.upper() not in DEVICE_COMMON_TO_REDFISH:
msg = self._('Invalid input "%(device)s". Valid devices: NETWORK, HDD, ISCSI or CDROM.') % {'device': item}
raise exception.IloInvalidInputError(msg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
try:
sushy_system.update_persistent_boot(devices, persistent=True) # depends on [control=['try'], data=[]]
except sushy.exceptions.SushyError as e:
msg = self._('The Redfish controller failed to update persistent boot device %(devices)s.Error: %(error)s') % {'devices': devices, 'error': str(e)}
LOG.debug(msg)
raise exception.IloError(msg) # depends on [control=['except'], data=['e']] |
def get_deliveryservers(self, domainid, page=None):
"""Get a domains delivery servers"""
opts = {}
if page:
opts['page'] = page
return self.api_call(
ENDPOINTS['deliveryservers']['list'],
dict(domainid=domainid), **opts) | def function[get_deliveryservers, parameter[self, domainid, page]]:
constant[Get a domains delivery servers]
variable[opts] assign[=] dictionary[[], []]
if name[page] begin[:]
call[name[opts]][constant[page]] assign[=] name[page]
return[call[name[self].api_call, parameter[call[call[name[ENDPOINTS]][constant[deliveryservers]]][constant[list]], call[name[dict], parameter[]]]]] | keyword[def] identifier[get_deliveryservers] ( identifier[self] , identifier[domainid] , identifier[page] = keyword[None] ):
literal[string]
identifier[opts] ={}
keyword[if] identifier[page] :
identifier[opts] [ literal[string] ]= identifier[page]
keyword[return] identifier[self] . identifier[api_call] (
identifier[ENDPOINTS] [ literal[string] ][ literal[string] ],
identifier[dict] ( identifier[domainid] = identifier[domainid] ),** identifier[opts] ) | def get_deliveryservers(self, domainid, page=None):
"""Get a domains delivery servers"""
opts = {}
if page:
opts['page'] = page # depends on [control=['if'], data=[]]
return self.api_call(ENDPOINTS['deliveryservers']['list'], dict(domainid=domainid), **opts) |
def get_default(self):
"""Return the default VLAN from the set."""
length = len(self)
if length == 0:
return None
elif length == 1:
return self[0]
else:
return sorted(self, key=attrgetter('id'))[0] | def function[get_default, parameter[self]]:
constant[Return the default VLAN from the set.]
variable[length] assign[=] call[name[len], parameter[name[self]]]
if compare[name[length] equal[==] constant[0]] begin[:]
return[constant[None]] | keyword[def] identifier[get_default] ( identifier[self] ):
literal[string]
identifier[length] = identifier[len] ( identifier[self] )
keyword[if] identifier[length] == literal[int] :
keyword[return] keyword[None]
keyword[elif] identifier[length] == literal[int] :
keyword[return] identifier[self] [ literal[int] ]
keyword[else] :
keyword[return] identifier[sorted] ( identifier[self] , identifier[key] = identifier[attrgetter] ( literal[string] ))[ literal[int] ] | def get_default(self):
"""Return the default VLAN from the set."""
length = len(self)
if length == 0:
return None # depends on [control=['if'], data=[]]
elif length == 1:
return self[0] # depends on [control=['if'], data=[]]
else:
return sorted(self, key=attrgetter('id'))[0] |
def wheelEvent(self, event):
"""
Reimplements the :meth:`QPlainTextEdit.wheelEvent` method.
:param event: Event.
:type event: QEvent
"""
if event.modifiers() == Qt.ControlModifier:
if event.delta() == 120:
self.zoom_in()
elif event.delta() == -120:
self.zoom_out()
event.ignore()
else:
QPlainTextEdit.wheelEvent(self, event) | def function[wheelEvent, parameter[self, event]]:
constant[
Reimplements the :meth:`QPlainTextEdit.wheelEvent` method.
:param event: Event.
:type event: QEvent
]
if compare[call[name[event].modifiers, parameter[]] equal[==] name[Qt].ControlModifier] begin[:]
if compare[call[name[event].delta, parameter[]] equal[==] constant[120]] begin[:]
call[name[self].zoom_in, parameter[]]
call[name[event].ignore, parameter[]] | keyword[def] identifier[wheelEvent] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[event] . identifier[modifiers] ()== identifier[Qt] . identifier[ControlModifier] :
keyword[if] identifier[event] . identifier[delta] ()== literal[int] :
identifier[self] . identifier[zoom_in] ()
keyword[elif] identifier[event] . identifier[delta] ()==- literal[int] :
identifier[self] . identifier[zoom_out] ()
identifier[event] . identifier[ignore] ()
keyword[else] :
identifier[QPlainTextEdit] . identifier[wheelEvent] ( identifier[self] , identifier[event] ) | def wheelEvent(self, event):
"""
Reimplements the :meth:`QPlainTextEdit.wheelEvent` method.
:param event: Event.
:type event: QEvent
"""
if event.modifiers() == Qt.ControlModifier:
if event.delta() == 120:
self.zoom_in() # depends on [control=['if'], data=[]]
elif event.delta() == -120:
self.zoom_out() # depends on [control=['if'], data=[]]
event.ignore() # depends on [control=['if'], data=[]]
else:
QPlainTextEdit.wheelEvent(self, event) |
def payload_register(ptype, klass, pid):
""" is used while a hook is running to let Juju know that a
payload has been started."""
cmd = ['payload-register']
for x in [ptype, klass, pid]:
cmd.append(x)
subprocess.check_call(cmd) | def function[payload_register, parameter[ptype, klass, pid]]:
constant[ is used while a hook is running to let Juju know that a
payload has been started.]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18bc71810>]]
for taget[name[x]] in starred[list[[<ast.Name object at 0x7da18bc72650>, <ast.Name object at 0x7da18bc70bb0>, <ast.Name object at 0x7da18bc73040>]]] begin[:]
call[name[cmd].append, parameter[name[x]]]
call[name[subprocess].check_call, parameter[name[cmd]]] | keyword[def] identifier[payload_register] ( identifier[ptype] , identifier[klass] , identifier[pid] ):
literal[string]
identifier[cmd] =[ literal[string] ]
keyword[for] identifier[x] keyword[in] [ identifier[ptype] , identifier[klass] , identifier[pid] ]:
identifier[cmd] . identifier[append] ( identifier[x] )
identifier[subprocess] . identifier[check_call] ( identifier[cmd] ) | def payload_register(ptype, klass, pid):
""" is used while a hook is running to let Juju know that a
payload has been started."""
cmd = ['payload-register']
for x in [ptype, klass, pid]:
cmd.append(x) # depends on [control=['for'], data=['x']]
subprocess.check_call(cmd) |
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
Implementation copied from Django.
https://github.com/django/django/blob/master/django/utils/http.py#L157
License: BSD 3-clause
"""
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc | def function[parse_http_date, parameter[date]]:
constant[
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
Implementation copied from Django.
https://github.com/django/django/blob/master/django/utils/http.py#L157
License: BSD 3-clause
]
variable[MONTHS] assign[=] call[constant[jan feb mar apr may jun jul aug sep oct nov dec].split, parameter[]]
variable[__D] assign[=] constant[(?P<day>\d{2})]
variable[__D2] assign[=] constant[(?P<day>[ \d]\d)]
variable[__M] assign[=] constant[(?P<mon>\w{3})]
variable[__Y] assign[=] constant[(?P<year>\d{4})]
variable[__Y2] assign[=] constant[(?P<year>\d{2})]
variable[__T] assign[=] constant[(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})]
variable[RFC1123_DATE] assign[=] call[name[re].compile, parameter[binary_operation[constant[^\w{3}, %s %s %s %s GMT$] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b05365f0>, <ast.Name object at 0x7da1b05378e0>, <ast.Name object at 0x7da1b0535750>, <ast.Name object at 0x7da1b0534940>]]]]]
variable[RFC850_DATE] assign[=] call[name[re].compile, parameter[binary_operation[constant[^\w{6,9}, %s-%s-%s %s GMT$] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0534520>, <ast.Name object at 0x7da1b0535c60>, <ast.Name object at 0x7da1b0534400>, <ast.Name object at 0x7da1b0536620>]]]]]
variable[ASCTIME_DATE] assign[=] call[name[re].compile, parameter[binary_operation[constant[^\w{3} %s %s %s %s$] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0537f70>, <ast.Name object at 0x7da1b0537310>, <ast.Name object at 0x7da1b0536f20>, <ast.Name object at 0x7da1b0536fb0>]]]]]
for taget[name[regex]] in starred[tuple[[<ast.Name object at 0x7da1b05378b0>, <ast.Name object at 0x7da1b05369e0>, <ast.Name object at 0x7da1b0535d20>]]] begin[:]
variable[m] assign[=] call[name[regex].match, parameter[name[date]]]
if compare[name[m] is_not constant[None]] begin[:]
break
<ast.Try object at 0x7da1b0534040> | keyword[def] identifier[parse_http_date] ( identifier[date] ):
literal[string]
identifier[MONTHS] = literal[string] . identifier[split] ()
identifier[__D] = literal[string]
identifier[__D2] = literal[string]
identifier[__M] = literal[string]
identifier[__Y] = literal[string]
identifier[__Y2] = literal[string]
identifier[__T] = literal[string]
identifier[RFC1123_DATE] = identifier[re] . identifier[compile] ( literal[string] %( identifier[__D] , identifier[__M] , identifier[__Y] , identifier[__T] ))
identifier[RFC850_DATE] = identifier[re] . identifier[compile] ( literal[string] %( identifier[__D] , identifier[__M] , identifier[__Y2] , identifier[__T] ))
identifier[ASCTIME_DATE] = identifier[re] . identifier[compile] ( literal[string] %( identifier[__M] , identifier[__D2] , identifier[__T] , identifier[__Y] ))
keyword[for] identifier[regex] keyword[in] identifier[RFC1123_DATE] , identifier[RFC850_DATE] , identifier[ASCTIME_DATE] :
identifier[m] = identifier[regex] . identifier[match] ( identifier[date] )
keyword[if] identifier[m] keyword[is] keyword[not] keyword[None] :
keyword[break]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[date] )
keyword[try] :
identifier[year] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ))
keyword[if] identifier[year] < literal[int] :
keyword[if] identifier[year] < literal[int] :
identifier[year] += literal[int]
keyword[else] :
identifier[year] += literal[int]
identifier[month] = identifier[MONTHS] . identifier[index] ( identifier[m] . identifier[group] ( literal[string] ). identifier[lower] ())+ literal[int]
identifier[day] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ))
identifier[hour] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ))
identifier[min] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ))
identifier[sec] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] ))
identifier[result] = identifier[datetime] . identifier[datetime] ( identifier[year] , identifier[month] , identifier[day] , identifier[hour] , identifier[min] , identifier[sec] )
keyword[return] identifier[calendar] . identifier[timegm] ( identifier[result] . identifier[utctimetuple] ())
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[date] ) keyword[from] identifier[exc] | def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
Implementation copied from Django.
https://github.com/django/django/blob/master/django/utils/http.py#L157
License: BSD 3-clause
"""
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = '(?P<day>\\d{2})'
__D2 = '(?P<day>[ \\d]\\d)'
__M = '(?P<mon>\\w{3})'
__Y = '(?P<year>\\d{4})'
__Y2 = '(?P<year>\\d{2})'
__T = '(?P<hour>\\d{2}):(?P<min>\\d{2}):(?P<sec>\\d{2})'
RFC1123_DATE = re.compile('^\\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile('^\\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile('^\\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in (RFC1123_DATE, RFC850_DATE, ASCTIME_DATE):
m = regex.match(date)
if m is not None:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['regex']]
else:
raise ValueError('%r is not in a valid HTTP date format' % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000 # depends on [control=['if'], data=['year']]
else:
year += 1900 # depends on [control=['if'], data=['year']]
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple()) # depends on [control=['try'], data=[]]
except Exception as exc:
raise ValueError('%r is not a valid date' % date) from exc # depends on [control=['except'], data=['exc']] |
def describe(self, uri, format="", convert=True):
"""
A simple DESCRIBE query with no 'where' arguments. 'uri' is the resource you want to describe.
TODO: there are some errors with describe queries, due to the results being sent back
For the moment we're not using them much.. needs to be tested more.
"""
lines = ["PREFIX %s: <%s>" % (k, r) for k, r in self.prefixes.iteritems()]
if uri.startswith("http://"):
lines.extend(["DESCRIBE <%s>" % uri])
else: # it's a shortened uri
lines.extend(["DESCRIBE %s" % uri])
query = "\n".join(lines)
if self.verbose:
print(query, "\n\n")
return self.__doQuery(query, format, convert) | def function[describe, parameter[self, uri, format, convert]]:
constant[
A simple DESCRIBE query with no 'where' arguments. 'uri' is the resource you want to describe.
TODO: there are some errors with describe queries, due to the results being sent back
For the moment we're not using them much.. needs to be tested more.
]
variable[lines] assign[=] <ast.ListComp object at 0x7da1b1153070>
if call[name[uri].startswith, parameter[constant[http://]]] begin[:]
call[name[lines].extend, parameter[list[[<ast.BinOp object at 0x7da1b11518a0>]]]]
variable[query] assign[=] call[constant[
].join, parameter[name[lines]]]
if name[self].verbose begin[:]
call[name[print], parameter[name[query], constant[
]]]
return[call[name[self].__doQuery, parameter[name[query], name[format], name[convert]]]] | keyword[def] identifier[describe] ( identifier[self] , identifier[uri] , identifier[format] = literal[string] , identifier[convert] = keyword[True] ):
literal[string]
identifier[lines] =[ literal[string] %( identifier[k] , identifier[r] ) keyword[for] identifier[k] , identifier[r] keyword[in] identifier[self] . identifier[prefixes] . identifier[iteritems] ()]
keyword[if] identifier[uri] . identifier[startswith] ( literal[string] ):
identifier[lines] . identifier[extend] ([ literal[string] % identifier[uri] ])
keyword[else] :
identifier[lines] . identifier[extend] ([ literal[string] % identifier[uri] ])
identifier[query] = literal[string] . identifier[join] ( identifier[lines] )
keyword[if] identifier[self] . identifier[verbose] :
identifier[print] ( identifier[query] , literal[string] )
keyword[return] identifier[self] . identifier[__doQuery] ( identifier[query] , identifier[format] , identifier[convert] ) | def describe(self, uri, format='', convert=True):
"""
A simple DESCRIBE query with no 'where' arguments. 'uri' is the resource you want to describe.
TODO: there are some errors with describe queries, due to the results being sent back
For the moment we're not using them much.. needs to be tested more.
"""
lines = ['PREFIX %s: <%s>' % (k, r) for (k, r) in self.prefixes.iteritems()]
if uri.startswith('http://'):
lines.extend(['DESCRIBE <%s>' % uri]) # depends on [control=['if'], data=[]]
else: # it's a shortened uri
lines.extend(['DESCRIBE %s' % uri])
query = '\n'.join(lines)
if self.verbose:
print(query, '\n\n') # depends on [control=['if'], data=[]]
return self.__doQuery(query, format, convert) |
def add_jira_status(test_key, test_status, test_comment):
"""Save test status and comments to update Jira later
:param test_key: test case key in Jira
:param test_status: test case status
:param test_comment: test case comments
"""
global attachments
if test_key and enabled:
if test_key in jira_tests_status:
# Merge data with previous test status
previous_status = jira_tests_status[test_key]
test_status = 'Pass' if previous_status[1] == 'Pass' and test_status == 'Pass' else 'Fail'
if previous_status[2] and test_comment:
test_comment = '{}\n{}'.format(previous_status[2], test_comment)
elif previous_status[2] and not test_comment:
test_comment = previous_status[2]
attachments += previous_status[3]
# Add or update test status
jira_tests_status[test_key] = (test_key, test_status, test_comment, attachments) | def function[add_jira_status, parameter[test_key, test_status, test_comment]]:
constant[Save test status and comments to update Jira later
:param test_key: test case key in Jira
:param test_status: test case status
:param test_comment: test case comments
]
<ast.Global object at 0x7da1b244fa30>
if <ast.BoolOp object at 0x7da1b244fa60> begin[:]
if compare[name[test_key] in name[jira_tests_status]] begin[:]
variable[previous_status] assign[=] call[name[jira_tests_status]][name[test_key]]
variable[test_status] assign[=] <ast.IfExp object at 0x7da1b23eda80>
if <ast.BoolOp object at 0x7da1b23edff0> begin[:]
variable[test_comment] assign[=] call[constant[{}
{}].format, parameter[call[name[previous_status]][constant[2]], name[test_comment]]]
<ast.AugAssign object at 0x7da1b23ec220>
call[name[jira_tests_status]][name[test_key]] assign[=] tuple[[<ast.Name object at 0x7da1b244fbb0>, <ast.Name object at 0x7da1b244c4c0>, <ast.Name object at 0x7da1b244ea70>, <ast.Name object at 0x7da1b244ca60>]] | keyword[def] identifier[add_jira_status] ( identifier[test_key] , identifier[test_status] , identifier[test_comment] ):
literal[string]
keyword[global] identifier[attachments]
keyword[if] identifier[test_key] keyword[and] identifier[enabled] :
keyword[if] identifier[test_key] keyword[in] identifier[jira_tests_status] :
identifier[previous_status] = identifier[jira_tests_status] [ identifier[test_key] ]
identifier[test_status] = literal[string] keyword[if] identifier[previous_status] [ literal[int] ]== literal[string] keyword[and] identifier[test_status] == literal[string] keyword[else] literal[string]
keyword[if] identifier[previous_status] [ literal[int] ] keyword[and] identifier[test_comment] :
identifier[test_comment] = literal[string] . identifier[format] ( identifier[previous_status] [ literal[int] ], identifier[test_comment] )
keyword[elif] identifier[previous_status] [ literal[int] ] keyword[and] keyword[not] identifier[test_comment] :
identifier[test_comment] = identifier[previous_status] [ literal[int] ]
identifier[attachments] += identifier[previous_status] [ literal[int] ]
identifier[jira_tests_status] [ identifier[test_key] ]=( identifier[test_key] , identifier[test_status] , identifier[test_comment] , identifier[attachments] ) | def add_jira_status(test_key, test_status, test_comment):
"""Save test status and comments to update Jira later
:param test_key: test case key in Jira
:param test_status: test case status
:param test_comment: test case comments
"""
global attachments
if test_key and enabled:
if test_key in jira_tests_status:
# Merge data with previous test status
previous_status = jira_tests_status[test_key]
test_status = 'Pass' if previous_status[1] == 'Pass' and test_status == 'Pass' else 'Fail'
if previous_status[2] and test_comment:
test_comment = '{}\n{}'.format(previous_status[2], test_comment) # depends on [control=['if'], data=[]]
elif previous_status[2] and (not test_comment):
test_comment = previous_status[2] # depends on [control=['if'], data=[]]
attachments += previous_status[3] # depends on [control=['if'], data=['test_key', 'jira_tests_status']]
# Add or update test status
jira_tests_status[test_key] = (test_key, test_status, test_comment, attachments) # depends on [control=['if'], data=[]] |
def get_contents(self):
"""The contents of an alias is the concatenation
of the content signatures of all its sources."""
childsigs = [n.get_csig() for n in self.children()]
return ''.join(childsigs) | def function[get_contents, parameter[self]]:
constant[The contents of an alias is the concatenation
of the content signatures of all its sources.]
variable[childsigs] assign[=] <ast.ListComp object at 0x7da2041db400>
return[call[constant[].join, parameter[name[childsigs]]]] | keyword[def] identifier[get_contents] ( identifier[self] ):
literal[string]
identifier[childsigs] =[ identifier[n] . identifier[get_csig] () keyword[for] identifier[n] keyword[in] identifier[self] . identifier[children] ()]
keyword[return] literal[string] . identifier[join] ( identifier[childsigs] ) | def get_contents(self):
"""The contents of an alias is the concatenation
of the content signatures of all its sources."""
childsigs = [n.get_csig() for n in self.children()]
return ''.join(childsigs) |
def get_1st_attr_by_path(self, field_path, **kwargs):
"""
It returns first value looked up by field path.
Field path is dot-formatted string path: ``parent_field.child_field``.
:param field_path: field path. It allows ``*`` as wildcard.
:type field_path: str
:param default: Default value if field does not exist.
If it is not defined :class:`AttributeError` exception will be raised.
:return: value
"""
res = self.get_attrs_by_path(field_path, stop_first=True)
if res is None:
try:
return kwargs['default']
except KeyError:
raise AttributeError("Field '{0}' does not exist".format(field_path))
return res.pop() | def function[get_1st_attr_by_path, parameter[self, field_path]]:
constant[
It returns first value looked up by field path.
Field path is dot-formatted string path: ``parent_field.child_field``.
:param field_path: field path. It allows ``*`` as wildcard.
:type field_path: str
:param default: Default value if field does not exist.
If it is not defined :class:`AttributeError` exception will be raised.
:return: value
]
variable[res] assign[=] call[name[self].get_attrs_by_path, parameter[name[field_path]]]
if compare[name[res] is constant[None]] begin[:]
<ast.Try object at 0x7da1b0aa6bc0>
return[call[name[res].pop, parameter[]]] | keyword[def] identifier[get_1st_attr_by_path] ( identifier[self] , identifier[field_path] ,** identifier[kwargs] ):
literal[string]
identifier[res] = identifier[self] . identifier[get_attrs_by_path] ( identifier[field_path] , identifier[stop_first] = keyword[True] )
keyword[if] identifier[res] keyword[is] keyword[None] :
keyword[try] :
keyword[return] identifier[kwargs] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[AttributeError] ( literal[string] . identifier[format] ( identifier[field_path] ))
keyword[return] identifier[res] . identifier[pop] () | def get_1st_attr_by_path(self, field_path, **kwargs):
"""
It returns first value looked up by field path.
Field path is dot-formatted string path: ``parent_field.child_field``.
:param field_path: field path. It allows ``*`` as wildcard.
:type field_path: str
:param default: Default value if field does not exist.
If it is not defined :class:`AttributeError` exception will be raised.
:return: value
"""
res = self.get_attrs_by_path(field_path, stop_first=True)
if res is None:
try:
return kwargs['default'] # depends on [control=['try'], data=[]]
except KeyError:
raise AttributeError("Field '{0}' does not exist".format(field_path)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return res.pop() |
def list_nodes_full(call=None):
''' Return a list of the BareMetal servers that are on the provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'list_nodes_full must be called with -f or --function'
)
items = query(method='servers')
# For each server, iterate on its parameters.
ret = {}
for node in items['servers']:
ret[node['name']] = {}
for item in node:
value = node[item]
ret[node['name']][item] = value
return ret | def function[list_nodes_full, parameter[call]]:
constant[ Return a list of the BareMetal servers that are on the provider.
]
if compare[name[call] equal[==] constant[action]] begin[:]
<ast.Raise object at 0x7da1b2345f30>
variable[items] assign[=] call[name[query], parameter[]]
variable[ret] assign[=] dictionary[[], []]
for taget[name[node]] in starred[call[name[items]][constant[servers]]] begin[:]
call[name[ret]][call[name[node]][constant[name]]] assign[=] dictionary[[], []]
for taget[name[item]] in starred[name[node]] begin[:]
variable[value] assign[=] call[name[node]][name[item]]
call[call[name[ret]][call[name[node]][constant[name]]]][name[item]] assign[=] name[value]
return[name[ret]] | keyword[def] identifier[list_nodes_full] ( identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] == literal[string] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
)
identifier[items] = identifier[query] ( identifier[method] = literal[string] )
identifier[ret] ={}
keyword[for] identifier[node] keyword[in] identifier[items] [ literal[string] ]:
identifier[ret] [ identifier[node] [ literal[string] ]]={}
keyword[for] identifier[item] keyword[in] identifier[node] :
identifier[value] = identifier[node] [ identifier[item] ]
identifier[ret] [ identifier[node] [ literal[string] ]][ identifier[item] ]= identifier[value]
keyword[return] identifier[ret] | def list_nodes_full(call=None):
""" Return a list of the BareMetal servers that are on the provider.
"""
if call == 'action':
raise SaltCloudSystemExit('list_nodes_full must be called with -f or --function') # depends on [control=['if'], data=[]]
items = query(method='servers')
# For each server, iterate on its parameters.
ret = {}
for node in items['servers']:
ret[node['name']] = {}
for item in node:
value = node[item]
ret[node['name']][item] = value # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['node']]
return ret |
def prepend_status(func):
"""Prepends the output of `func` with the status."""
@ft.wraps(func)
def wrapper(self, *args, **kwargs):
"""Wrapper stub."""
res = func(self, *args, **kwargs)
if self.status is not StepResult.UNSET:
res = "[{status}]".format(status=self.status.name) + res
return res
return wrapper | def function[prepend_status, parameter[func]]:
constant[Prepends the output of `func` with the status.]
def function[wrapper, parameter[self]]:
constant[Wrapper stub.]
variable[res] assign[=] call[name[func], parameter[name[self], <ast.Starred object at 0x7da20c6e55a0>]]
if compare[name[self].status is_not name[StepResult].UNSET] begin[:]
variable[res] assign[=] binary_operation[call[constant[[{status}]].format, parameter[]] + name[res]]
return[name[res]]
return[name[wrapper]] | keyword[def] identifier[prepend_status] ( identifier[func] ):
literal[string]
@ identifier[ft] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[res] = identifier[func] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[self] . identifier[status] keyword[is] keyword[not] identifier[StepResult] . identifier[UNSET] :
identifier[res] = literal[string] . identifier[format] ( identifier[status] = identifier[self] . identifier[status] . identifier[name] )+ identifier[res]
keyword[return] identifier[res]
keyword[return] identifier[wrapper] | def prepend_status(func):
"""Prepends the output of `func` with the status."""
@ft.wraps(func)
def wrapper(self, *args, **kwargs):
"""Wrapper stub."""
res = func(self, *args, **kwargs)
if self.status is not StepResult.UNSET:
res = '[{status}]'.format(status=self.status.name) + res # depends on [control=['if'], data=[]]
return res
return wrapper |
def extract_units(tokens, start, result):
"""Extract units from bracketed after nu"""
for e in result:
for child in e.iter():
if 'cm−1' in child.text:
return [E('units', 'cm−1')]
return [] | def function[extract_units, parameter[tokens, start, result]]:
constant[Extract units from bracketed after nu]
for taget[name[e]] in starred[name[result]] begin[:]
for taget[name[child]] in starred[call[name[e].iter, parameter[]]] begin[:]
if compare[constant[cm−1] in name[child].text] begin[:]
return[list[[<ast.Call object at 0x7da1b138a860>]]]
return[list[[]]] | keyword[def] identifier[extract_units] ( identifier[tokens] , identifier[start] , identifier[result] ):
literal[string]
keyword[for] identifier[e] keyword[in] identifier[result] :
keyword[for] identifier[child] keyword[in] identifier[e] . identifier[iter] ():
keyword[if] literal[string] keyword[in] identifier[child] . identifier[text] :
keyword[return] [ identifier[E] ( literal[string] , literal[string] )]
keyword[return] [] | def extract_units(tokens, start, result):
"""Extract units from bracketed after nu"""
for e in result:
for child in e.iter():
if 'cm−1' in child.text:
return [E('units', 'cm−1')] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] # depends on [control=['for'], data=['e']]
return [] |
def get_flux(self, energies):
"""Get the total flux of this particle source at the given energies (summed over the components)"""
results = [component.shape(energies) for component in self.components.values()]
return numpy.sum(results, 0) | def function[get_flux, parameter[self, energies]]:
constant[Get the total flux of this particle source at the given energies (summed over the components)]
variable[results] assign[=] <ast.ListComp object at 0x7da18bcc8be0>
return[call[name[numpy].sum, parameter[name[results], constant[0]]]] | keyword[def] identifier[get_flux] ( identifier[self] , identifier[energies] ):
literal[string]
identifier[results] =[ identifier[component] . identifier[shape] ( identifier[energies] ) keyword[for] identifier[component] keyword[in] identifier[self] . identifier[components] . identifier[values] ()]
keyword[return] identifier[numpy] . identifier[sum] ( identifier[results] , literal[int] ) | def get_flux(self, energies):
"""Get the total flux of this particle source at the given energies (summed over the components)"""
results = [component.shape(energies) for component in self.components.values()]
return numpy.sum(results, 0) |
def _require_host_parameter(args, to):
"""
Make sure, that user specified --host argument.
"""
if not args.host:
sys.stderr.write("--host is required parameter to --%s\n" % to)
sys.exit(1) | def function[_require_host_parameter, parameter[args, to]]:
constant[
Make sure, that user specified --host argument.
]
if <ast.UnaryOp object at 0x7da1b09bee90> begin[:]
call[name[sys].stderr.write, parameter[binary_operation[constant[--host is required parameter to --%s
] <ast.Mod object at 0x7da2590d6920> name[to]]]]
call[name[sys].exit, parameter[constant[1]]] | keyword[def] identifier[_require_host_parameter] ( identifier[args] , identifier[to] ):
literal[string]
keyword[if] keyword[not] identifier[args] . identifier[host] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] % identifier[to] )
identifier[sys] . identifier[exit] ( literal[int] ) | def _require_host_parameter(args, to):
"""
Make sure, that user specified --host argument.
"""
if not args.host:
sys.stderr.write('--host is required parameter to --%s\n' % to)
sys.exit(1) # depends on [control=['if'], data=[]] |
def _extract_inner_match(self, candidate, offset):
"""Attempts to extract a match from candidate if the whole candidate
does not qualify as a match.
Arguments:
candidate -- The candidate text that might contain a phone number
offset -- The current offset of candidate within text
Returns the match found, None if none can be found
"""
for possible_inner_match in _INNER_MATCHES:
group_match = possible_inner_match.search(candidate)
is_first_match = True
while group_match and self._max_tries > 0:
if is_first_match:
# We should handle any group before this one too.
group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN,
candidate[:group_match.start()])
match = self._parse_and_verify(group, offset)
if match is not None:
return match
self._max_tries -= 1
is_first_match = False
group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN,
group_match.group(1))
match = self._parse_and_verify(group, offset + group_match.start(1))
if match is not None:
return match
self._max_tries -= 1
group_match = possible_inner_match.search(candidate, group_match.start() + 1)
return None | def function[_extract_inner_match, parameter[self, candidate, offset]]:
constant[Attempts to extract a match from candidate if the whole candidate
does not qualify as a match.
Arguments:
candidate -- The candidate text that might contain a phone number
offset -- The current offset of candidate within text
Returns the match found, None if none can be found
]
for taget[name[possible_inner_match]] in starred[name[_INNER_MATCHES]] begin[:]
variable[group_match] assign[=] call[name[possible_inner_match].search, parameter[name[candidate]]]
variable[is_first_match] assign[=] constant[True]
while <ast.BoolOp object at 0x7da1b18a1b40> begin[:]
if name[is_first_match] begin[:]
variable[group] assign[=] call[name[self]._trim_after_first_match, parameter[name[_UNWANTED_END_CHAR_PATTERN], call[name[candidate]][<ast.Slice object at 0x7da1b18a2a10>]]]
variable[match] assign[=] call[name[self]._parse_and_verify, parameter[name[group], name[offset]]]
if compare[name[match] is_not constant[None]] begin[:]
return[name[match]]
<ast.AugAssign object at 0x7da1b18a1360>
variable[is_first_match] assign[=] constant[False]
variable[group] assign[=] call[name[self]._trim_after_first_match, parameter[name[_UNWANTED_END_CHAR_PATTERN], call[name[group_match].group, parameter[constant[1]]]]]
variable[match] assign[=] call[name[self]._parse_and_verify, parameter[name[group], binary_operation[name[offset] + call[name[group_match].start, parameter[constant[1]]]]]]
if compare[name[match] is_not constant[None]] begin[:]
return[name[match]]
<ast.AugAssign object at 0x7da1b188d1b0>
variable[group_match] assign[=] call[name[possible_inner_match].search, parameter[name[candidate], binary_operation[call[name[group_match].start, parameter[]] + constant[1]]]]
return[constant[None]] | keyword[def] identifier[_extract_inner_match] ( identifier[self] , identifier[candidate] , identifier[offset] ):
literal[string]
keyword[for] identifier[possible_inner_match] keyword[in] identifier[_INNER_MATCHES] :
identifier[group_match] = identifier[possible_inner_match] . identifier[search] ( identifier[candidate] )
identifier[is_first_match] = keyword[True]
keyword[while] identifier[group_match] keyword[and] identifier[self] . identifier[_max_tries] > literal[int] :
keyword[if] identifier[is_first_match] :
identifier[group] = identifier[self] . identifier[_trim_after_first_match] ( identifier[_UNWANTED_END_CHAR_PATTERN] ,
identifier[candidate] [: identifier[group_match] . identifier[start] ()])
identifier[match] = identifier[self] . identifier[_parse_and_verify] ( identifier[group] , identifier[offset] )
keyword[if] identifier[match] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[match]
identifier[self] . identifier[_max_tries] -= literal[int]
identifier[is_first_match] = keyword[False]
identifier[group] = identifier[self] . identifier[_trim_after_first_match] ( identifier[_UNWANTED_END_CHAR_PATTERN] ,
identifier[group_match] . identifier[group] ( literal[int] ))
identifier[match] = identifier[self] . identifier[_parse_and_verify] ( identifier[group] , identifier[offset] + identifier[group_match] . identifier[start] ( literal[int] ))
keyword[if] identifier[match] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[match]
identifier[self] . identifier[_max_tries] -= literal[int]
identifier[group_match] = identifier[possible_inner_match] . identifier[search] ( identifier[candidate] , identifier[group_match] . identifier[start] ()+ literal[int] )
keyword[return] keyword[None] | def _extract_inner_match(self, candidate, offset):
"""Attempts to extract a match from candidate if the whole candidate
does not qualify as a match.
Arguments:
candidate -- The candidate text that might contain a phone number
offset -- The current offset of candidate within text
Returns the match found, None if none can be found
"""
for possible_inner_match in _INNER_MATCHES:
group_match = possible_inner_match.search(candidate)
is_first_match = True
while group_match and self._max_tries > 0:
if is_first_match:
# We should handle any group before this one too.
group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, candidate[:group_match.start()])
match = self._parse_and_verify(group, offset)
if match is not None:
return match # depends on [control=['if'], data=['match']]
self._max_tries -= 1
is_first_match = False # depends on [control=['if'], data=[]]
group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, group_match.group(1))
match = self._parse_and_verify(group, offset + group_match.start(1))
if match is not None:
return match # depends on [control=['if'], data=['match']]
self._max_tries -= 1
group_match = possible_inner_match.search(candidate, group_match.start() + 1) # depends on [control=['while'], data=[]] # depends on [control=['for'], data=['possible_inner_match']]
return None |
def parse_param_signature(sig):
""" Parse a parameter signature of the form: type name (= default)? """
match = PARAM_SIG_RE.match(sig.strip())
if not match:
raise RuntimeError('Parameter signature invalid, got ' + sig)
groups = match.groups()
modifiers = groups[0].split()
typ, name, _, default = groups[-4:]
return ParamTuple(name=name, typ=typ,
default=default, modifiers=modifiers) | def function[parse_param_signature, parameter[sig]]:
constant[ Parse a parameter signature of the form: type name (= default)? ]
variable[match] assign[=] call[name[PARAM_SIG_RE].match, parameter[call[name[sig].strip, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b0fb8f40> begin[:]
<ast.Raise object at 0x7da1b0fb8970>
variable[groups] assign[=] call[name[match].groups, parameter[]]
variable[modifiers] assign[=] call[call[name[groups]][constant[0]].split, parameter[]]
<ast.Tuple object at 0x7da1b0fbaaa0> assign[=] call[name[groups]][<ast.Slice object at 0x7da1b0fb90f0>]
return[call[name[ParamTuple], parameter[]]] | keyword[def] identifier[parse_param_signature] ( identifier[sig] ):
literal[string]
identifier[match] = identifier[PARAM_SIG_RE] . identifier[match] ( identifier[sig] . identifier[strip] ())
keyword[if] keyword[not] identifier[match] :
keyword[raise] identifier[RuntimeError] ( literal[string] + identifier[sig] )
identifier[groups] = identifier[match] . identifier[groups] ()
identifier[modifiers] = identifier[groups] [ literal[int] ]. identifier[split] ()
identifier[typ] , identifier[name] , identifier[_] , identifier[default] = identifier[groups] [- literal[int] :]
keyword[return] identifier[ParamTuple] ( identifier[name] = identifier[name] , identifier[typ] = identifier[typ] ,
identifier[default] = identifier[default] , identifier[modifiers] = identifier[modifiers] ) | def parse_param_signature(sig):
""" Parse a parameter signature of the form: type name (= default)? """
match = PARAM_SIG_RE.match(sig.strip())
if not match:
raise RuntimeError('Parameter signature invalid, got ' + sig) # depends on [control=['if'], data=[]]
groups = match.groups()
modifiers = groups[0].split()
(typ, name, _, default) = groups[-4:]
return ParamTuple(name=name, typ=typ, default=default, modifiers=modifiers) |
def get_pmids(self):
"""Get list of all PMIDs associated with edges in the network."""
pmids = []
for ea in self._edge_attributes.values():
edge_pmids = ea.get('pmids')
if edge_pmids:
pmids += edge_pmids
return list(set(pmids)) | def function[get_pmids, parameter[self]]:
constant[Get list of all PMIDs associated with edges in the network.]
variable[pmids] assign[=] list[[]]
for taget[name[ea]] in starred[call[name[self]._edge_attributes.values, parameter[]]] begin[:]
variable[edge_pmids] assign[=] call[name[ea].get, parameter[constant[pmids]]]
if name[edge_pmids] begin[:]
<ast.AugAssign object at 0x7da18f00c400>
return[call[name[list], parameter[call[name[set], parameter[name[pmids]]]]]] | keyword[def] identifier[get_pmids] ( identifier[self] ):
literal[string]
identifier[pmids] =[]
keyword[for] identifier[ea] keyword[in] identifier[self] . identifier[_edge_attributes] . identifier[values] ():
identifier[edge_pmids] = identifier[ea] . identifier[get] ( literal[string] )
keyword[if] identifier[edge_pmids] :
identifier[pmids] += identifier[edge_pmids]
keyword[return] identifier[list] ( identifier[set] ( identifier[pmids] )) | def get_pmids(self):
"""Get list of all PMIDs associated with edges in the network."""
pmids = []
for ea in self._edge_attributes.values():
edge_pmids = ea.get('pmids')
if edge_pmids:
pmids += edge_pmids # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ea']]
return list(set(pmids)) |
def get_local_extrema(self, find_min=True, threshold_frac=None,
threshold_abs=None):
"""
Get all local extrema fractional coordinates in charge density,
searching for local minimum by default. Note that sites are NOT grouped
symmetrically.
Args:
find_min (bool): True to find local minimum else maximum, otherwise
find local maximum.
threshold_frac (float): optional fraction of extrema shown, which
returns `threshold_frac * tot_num_extrema` extrema fractional
coordinates based on highest/lowest intensity.
E.g. set 0.2 to show the extrema with 20% highest or lowest
intensity. Value range: 0 <= threshold_frac <= 1
Note that threshold_abs and threshold_frac should not set in the
same time.
threshold_abs (float): optional filter. When searching for local
minima, intensity <= threshold_abs returns; when searching for
local maxima, intensity >= threshold_abs returns.
Note that threshold_abs and threshold_frac should not set in the
same time.
Returns:
extrema_coords (list): list of fractional coordinates corresponding
to local extrema.
"""
sign, extrema_type = 1, "local maxima"
if find_min:
sign, extrema_type = -1, "local minima"
# Make 3x3x3 supercell
# This is a trick to resolve the periodical boundary issue.
total_chg = sign * self.chgcar.data["total"]
total_chg = np.tile(total_chg, reps=(3, 3, 3))
coordinates = peak_local_max(total_chg, min_distance=1)
# Remove duplicated sites introduced by supercell.
f_coords = [coord / total_chg.shape * 3 for coord in coordinates]
f_coords = [f - 1 for f in f_coords if
all(np.array(f) < 2) and all(np.array(f) >= 1)]
# Update information
self._update_extrema(f_coords, extrema_type,
threshold_frac=threshold_frac,
threshold_abs=threshold_abs)
return self.extrema_coords | def function[get_local_extrema, parameter[self, find_min, threshold_frac, threshold_abs]]:
constant[
Get all local extrema fractional coordinates in charge density,
searching for local minimum by default. Note that sites are NOT grouped
symmetrically.
Args:
find_min (bool): True to find local minimum else maximum, otherwise
find local maximum.
threshold_frac (float): optional fraction of extrema shown, which
returns `threshold_frac * tot_num_extrema` extrema fractional
coordinates based on highest/lowest intensity.
E.g. set 0.2 to show the extrema with 20% highest or lowest
intensity. Value range: 0 <= threshold_frac <= 1
Note that threshold_abs and threshold_frac should not set in the
same time.
threshold_abs (float): optional filter. When searching for local
minima, intensity <= threshold_abs returns; when searching for
local maxima, intensity >= threshold_abs returns.
Note that threshold_abs and threshold_frac should not set in the
same time.
Returns:
extrema_coords (list): list of fractional coordinates corresponding
to local extrema.
]
<ast.Tuple object at 0x7da2047ebca0> assign[=] tuple[[<ast.Constant object at 0x7da2047e8400>, <ast.Constant object at 0x7da2047e9000>]]
if name[find_min] begin[:]
<ast.Tuple object at 0x7da2047e81f0> assign[=] tuple[[<ast.UnaryOp object at 0x7da2047e8580>, <ast.Constant object at 0x7da2047e8a00>]]
variable[total_chg] assign[=] binary_operation[name[sign] * call[name[self].chgcar.data][constant[total]]]
variable[total_chg] assign[=] call[name[np].tile, parameter[name[total_chg]]]
variable[coordinates] assign[=] call[name[peak_local_max], parameter[name[total_chg]]]
variable[f_coords] assign[=] <ast.ListComp object at 0x7da20e956dd0>
variable[f_coords] assign[=] <ast.ListComp object at 0x7da20e957220>
call[name[self]._update_extrema, parameter[name[f_coords], name[extrema_type]]]
return[name[self].extrema_coords] | keyword[def] identifier[get_local_extrema] ( identifier[self] , identifier[find_min] = keyword[True] , identifier[threshold_frac] = keyword[None] ,
identifier[threshold_abs] = keyword[None] ):
literal[string]
identifier[sign] , identifier[extrema_type] = literal[int] , literal[string]
keyword[if] identifier[find_min] :
identifier[sign] , identifier[extrema_type] =- literal[int] , literal[string]
identifier[total_chg] = identifier[sign] * identifier[self] . identifier[chgcar] . identifier[data] [ literal[string] ]
identifier[total_chg] = identifier[np] . identifier[tile] ( identifier[total_chg] , identifier[reps] =( literal[int] , literal[int] , literal[int] ))
identifier[coordinates] = identifier[peak_local_max] ( identifier[total_chg] , identifier[min_distance] = literal[int] )
identifier[f_coords] =[ identifier[coord] / identifier[total_chg] . identifier[shape] * literal[int] keyword[for] identifier[coord] keyword[in] identifier[coordinates] ]
identifier[f_coords] =[ identifier[f] - literal[int] keyword[for] identifier[f] keyword[in] identifier[f_coords] keyword[if]
identifier[all] ( identifier[np] . identifier[array] ( identifier[f] )< literal[int] ) keyword[and] identifier[all] ( identifier[np] . identifier[array] ( identifier[f] )>= literal[int] )]
identifier[self] . identifier[_update_extrema] ( identifier[f_coords] , identifier[extrema_type] ,
identifier[threshold_frac] = identifier[threshold_frac] ,
identifier[threshold_abs] = identifier[threshold_abs] )
keyword[return] identifier[self] . identifier[extrema_coords] | def get_local_extrema(self, find_min=True, threshold_frac=None, threshold_abs=None):
"""
Get all local extrema fractional coordinates in charge density,
searching for local minimum by default. Note that sites are NOT grouped
symmetrically.
Args:
find_min (bool): True to find local minimum else maximum, otherwise
find local maximum.
threshold_frac (float): optional fraction of extrema shown, which
returns `threshold_frac * tot_num_extrema` extrema fractional
coordinates based on highest/lowest intensity.
E.g. set 0.2 to show the extrema with 20% highest or lowest
intensity. Value range: 0 <= threshold_frac <= 1
Note that threshold_abs and threshold_frac should not set in the
same time.
threshold_abs (float): optional filter. When searching for local
minima, intensity <= threshold_abs returns; when searching for
local maxima, intensity >= threshold_abs returns.
Note that threshold_abs and threshold_frac should not set in the
same time.
Returns:
extrema_coords (list): list of fractional coordinates corresponding
to local extrema.
"""
(sign, extrema_type) = (1, 'local maxima')
if find_min:
(sign, extrema_type) = (-1, 'local minima') # depends on [control=['if'], data=[]]
# Make 3x3x3 supercell
# This is a trick to resolve the periodical boundary issue.
total_chg = sign * self.chgcar.data['total']
total_chg = np.tile(total_chg, reps=(3, 3, 3))
coordinates = peak_local_max(total_chg, min_distance=1)
# Remove duplicated sites introduced by supercell.
f_coords = [coord / total_chg.shape * 3 for coord in coordinates]
f_coords = [f - 1 for f in f_coords if all(np.array(f) < 2) and all(np.array(f) >= 1)]
# Update information
self._update_extrema(f_coords, extrema_type, threshold_frac=threshold_frac, threshold_abs=threshold_abs)
return self.extrema_coords |
def integrate_angular_velocity(Omega, t0, t1, R0=None, tolerance=1e-12):
"""Compute frame with given angular velocity
Parameters
==========
Omega: tuple or callable
Angular velocity from which to compute frame. Can be
1) a 2-tuple of float arrays (t, v) giving the angular velocity vector at a series of times,
2) a function of time that returns the 3-vector angular velocity, or
3) a function of time and orientation (t, R) that returns the 3-vector angular velocity
In case 1, the angular velocity will be interpolated to the required times. Note that accuracy
is poor in case 1.
t0: float
Initial time
t1: float
Final time
R0: quaternion, optional
Initial frame orientation. Defaults to 1 (the identity orientation).
tolerance: float, optional
Absolute tolerance used in integration. Defaults to 1e-12.
Returns
=======
t: float array
R: quaternion array
"""
import warnings
from scipy.integrate import ode
if R0 is None:
R0 = quaternion.one
input_is_tabulated = False
try:
t_Omega, v = Omega
from scipy.interpolate import InterpolatedUnivariateSpline
Omega_x = InterpolatedUnivariateSpline(t_Omega, v[:, 0])
Omega_y = InterpolatedUnivariateSpline(t_Omega, v[:, 1])
Omega_z = InterpolatedUnivariateSpline(t_Omega, v[:, 2])
def Omega_func(t, R):
return [Omega_x(t), Omega_y(t), Omega_z(t)]
Omega_func(t0, R0)
input_is_tabulated = True
except (TypeError, ValueError):
def Omega_func(t, R):
return Omega(t, R)
try:
Omega_func(t0, R0)
except TypeError:
def Omega_func(t, R):
return Omega(t)
Omega_func(t0, R0)
def RHS(t, y):
R = quaternion.quaternion(*y)
return (0.5 * quaternion.quaternion(0.0, *Omega_func(t, R)) * R).components
y0 = R0.components
if input_is_tabulated:
from scipy.integrate import solve_ivp
t = t_Omega
t_span = [t_Omega[0], t_Omega[-1]]
solution = solve_ivp(RHS, t_span, y0, t_eval=t_Omega, atol=tolerance, rtol=100*np.finfo(float).eps)
R = quaternion.from_float_array(solution.y.T)
else:
solver = ode(RHS)
solver.set_initial_value(y0, t0)
solver.set_integrator('dop853', nsteps=1, atol=tolerance, rtol=0.0)
solver._integrator.iwork[2] = -1 # suppress Fortran-printed warning
t = appending_array((int(t1-t0),))
t.append(solver.t)
R = appending_array((int(t1-t0), 4))
R.append(solver.y)
warnings.filterwarnings("ignore", category=UserWarning)
t_last = solver.t
while solver.t < t1:
solver.integrate(t1, step=True)
if solver.t > t_last:
t.append(solver.t)
R.append(solver.y)
t_last = solver.t
warnings.resetwarnings()
t = t.a
R = quaternion.as_quat_array(R.a)
return t, R | def function[integrate_angular_velocity, parameter[Omega, t0, t1, R0, tolerance]]:
constant[Compute frame with given angular velocity
Parameters
==========
Omega: tuple or callable
Angular velocity from which to compute frame. Can be
1) a 2-tuple of float arrays (t, v) giving the angular velocity vector at a series of times,
2) a function of time that returns the 3-vector angular velocity, or
3) a function of time and orientation (t, R) that returns the 3-vector angular velocity
In case 1, the angular velocity will be interpolated to the required times. Note that accuracy
is poor in case 1.
t0: float
Initial time
t1: float
Final time
R0: quaternion, optional
Initial frame orientation. Defaults to 1 (the identity orientation).
tolerance: float, optional
Absolute tolerance used in integration. Defaults to 1e-12.
Returns
=======
t: float array
R: quaternion array
]
import module[warnings]
from relative_module[scipy.integrate] import module[ode]
if compare[name[R0] is constant[None]] begin[:]
variable[R0] assign[=] name[quaternion].one
variable[input_is_tabulated] assign[=] constant[False]
<ast.Try object at 0x7da1b1d567a0>
def function[RHS, parameter[t, y]]:
variable[R] assign[=] call[name[quaternion].quaternion, parameter[<ast.Starred object at 0x7da20c7968c0>]]
return[binary_operation[binary_operation[constant[0.5] * call[name[quaternion].quaternion, parameter[constant[0.0], <ast.Starred object at 0x7da18eb55a20>]]] * name[R]].components]
variable[y0] assign[=] name[R0].components
if name[input_is_tabulated] begin[:]
from relative_module[scipy.integrate] import module[solve_ivp]
variable[t] assign[=] name[t_Omega]
variable[t_span] assign[=] list[[<ast.Subscript object at 0x7da18eb55f00>, <ast.Subscript object at 0x7da1b1eebac0>]]
variable[solution] assign[=] call[name[solve_ivp], parameter[name[RHS], name[t_span], name[y0]]]
variable[R] assign[=] call[name[quaternion].from_float_array, parameter[name[solution].y.T]]
return[tuple[[<ast.Name object at 0x7da1b1ee99c0>, <ast.Name object at 0x7da1b1ee81f0>]]] | keyword[def] identifier[integrate_angular_velocity] ( identifier[Omega] , identifier[t0] , identifier[t1] , identifier[R0] = keyword[None] , identifier[tolerance] = literal[int] ):
literal[string]
keyword[import] identifier[warnings]
keyword[from] identifier[scipy] . identifier[integrate] keyword[import] identifier[ode]
keyword[if] identifier[R0] keyword[is] keyword[None] :
identifier[R0] = identifier[quaternion] . identifier[one]
identifier[input_is_tabulated] = keyword[False]
keyword[try] :
identifier[t_Omega] , identifier[v] = identifier[Omega]
keyword[from] identifier[scipy] . identifier[interpolate] keyword[import] identifier[InterpolatedUnivariateSpline]
identifier[Omega_x] = identifier[InterpolatedUnivariateSpline] ( identifier[t_Omega] , identifier[v] [:, literal[int] ])
identifier[Omega_y] = identifier[InterpolatedUnivariateSpline] ( identifier[t_Omega] , identifier[v] [:, literal[int] ])
identifier[Omega_z] = identifier[InterpolatedUnivariateSpline] ( identifier[t_Omega] , identifier[v] [:, literal[int] ])
keyword[def] identifier[Omega_func] ( identifier[t] , identifier[R] ):
keyword[return] [ identifier[Omega_x] ( identifier[t] ), identifier[Omega_y] ( identifier[t] ), identifier[Omega_z] ( identifier[t] )]
identifier[Omega_func] ( identifier[t0] , identifier[R0] )
identifier[input_is_tabulated] = keyword[True]
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[def] identifier[Omega_func] ( identifier[t] , identifier[R] ):
keyword[return] identifier[Omega] ( identifier[t] , identifier[R] )
keyword[try] :
identifier[Omega_func] ( identifier[t0] , identifier[R0] )
keyword[except] identifier[TypeError] :
keyword[def] identifier[Omega_func] ( identifier[t] , identifier[R] ):
keyword[return] identifier[Omega] ( identifier[t] )
identifier[Omega_func] ( identifier[t0] , identifier[R0] )
keyword[def] identifier[RHS] ( identifier[t] , identifier[y] ):
identifier[R] = identifier[quaternion] . identifier[quaternion] (* identifier[y] )
keyword[return] ( literal[int] * identifier[quaternion] . identifier[quaternion] ( literal[int] ,* identifier[Omega_func] ( identifier[t] , identifier[R] ))* identifier[R] ). identifier[components]
identifier[y0] = identifier[R0] . identifier[components]
keyword[if] identifier[input_is_tabulated] :
keyword[from] identifier[scipy] . identifier[integrate] keyword[import] identifier[solve_ivp]
identifier[t] = identifier[t_Omega]
identifier[t_span] =[ identifier[t_Omega] [ literal[int] ], identifier[t_Omega] [- literal[int] ]]
identifier[solution] = identifier[solve_ivp] ( identifier[RHS] , identifier[t_span] , identifier[y0] , identifier[t_eval] = identifier[t_Omega] , identifier[atol] = identifier[tolerance] , identifier[rtol] = literal[int] * identifier[np] . identifier[finfo] ( identifier[float] ). identifier[eps] )
identifier[R] = identifier[quaternion] . identifier[from_float_array] ( identifier[solution] . identifier[y] . identifier[T] )
keyword[else] :
identifier[solver] = identifier[ode] ( identifier[RHS] )
identifier[solver] . identifier[set_initial_value] ( identifier[y0] , identifier[t0] )
identifier[solver] . identifier[set_integrator] ( literal[string] , identifier[nsteps] = literal[int] , identifier[atol] = identifier[tolerance] , identifier[rtol] = literal[int] )
identifier[solver] . identifier[_integrator] . identifier[iwork] [ literal[int] ]=- literal[int]
identifier[t] = identifier[appending_array] (( identifier[int] ( identifier[t1] - identifier[t0] ),))
identifier[t] . identifier[append] ( identifier[solver] . identifier[t] )
identifier[R] = identifier[appending_array] (( identifier[int] ( identifier[t1] - identifier[t0] ), literal[int] ))
identifier[R] . identifier[append] ( identifier[solver] . identifier[y] )
identifier[warnings] . identifier[filterwarnings] ( literal[string] , identifier[category] = identifier[UserWarning] )
identifier[t_last] = identifier[solver] . identifier[t]
keyword[while] identifier[solver] . identifier[t] < identifier[t1] :
identifier[solver] . identifier[integrate] ( identifier[t1] , identifier[step] = keyword[True] )
keyword[if] identifier[solver] . identifier[t] > identifier[t_last] :
identifier[t] . identifier[append] ( identifier[solver] . identifier[t] )
identifier[R] . identifier[append] ( identifier[solver] . identifier[y] )
identifier[t_last] = identifier[solver] . identifier[t]
identifier[warnings] . identifier[resetwarnings] ()
identifier[t] = identifier[t] . identifier[a]
identifier[R] = identifier[quaternion] . identifier[as_quat_array] ( identifier[R] . identifier[a] )
keyword[return] identifier[t] , identifier[R] | def integrate_angular_velocity(Omega, t0, t1, R0=None, tolerance=1e-12):
"""Compute frame with given angular velocity
Parameters
==========
Omega: tuple or callable
Angular velocity from which to compute frame. Can be
1) a 2-tuple of float arrays (t, v) giving the angular velocity vector at a series of times,
2) a function of time that returns the 3-vector angular velocity, or
3) a function of time and orientation (t, R) that returns the 3-vector angular velocity
In case 1, the angular velocity will be interpolated to the required times. Note that accuracy
is poor in case 1.
t0: float
Initial time
t1: float
Final time
R0: quaternion, optional
Initial frame orientation. Defaults to 1 (the identity orientation).
tolerance: float, optional
Absolute tolerance used in integration. Defaults to 1e-12.
Returns
=======
t: float array
R: quaternion array
"""
import warnings
from scipy.integrate import ode
if R0 is None:
R0 = quaternion.one # depends on [control=['if'], data=['R0']]
input_is_tabulated = False
try:
(t_Omega, v) = Omega
from scipy.interpolate import InterpolatedUnivariateSpline
Omega_x = InterpolatedUnivariateSpline(t_Omega, v[:, 0])
Omega_y = InterpolatedUnivariateSpline(t_Omega, v[:, 1])
Omega_z = InterpolatedUnivariateSpline(t_Omega, v[:, 2])
def Omega_func(t, R):
return [Omega_x(t), Omega_y(t), Omega_z(t)]
Omega_func(t0, R0)
input_is_tabulated = True # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
def Omega_func(t, R):
return Omega(t, R)
try:
Omega_func(t0, R0) # depends on [control=['try'], data=[]]
except TypeError:
def Omega_func(t, R):
return Omega(t)
Omega_func(t0, R0) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
def RHS(t, y):
R = quaternion.quaternion(*y)
return (0.5 * quaternion.quaternion(0.0, *Omega_func(t, R)) * R).components
y0 = R0.components
if input_is_tabulated:
from scipy.integrate import solve_ivp
t = t_Omega
t_span = [t_Omega[0], t_Omega[-1]]
solution = solve_ivp(RHS, t_span, y0, t_eval=t_Omega, atol=tolerance, rtol=100 * np.finfo(float).eps)
R = quaternion.from_float_array(solution.y.T) # depends on [control=['if'], data=[]]
else:
solver = ode(RHS)
solver.set_initial_value(y0, t0)
solver.set_integrator('dop853', nsteps=1, atol=tolerance, rtol=0.0)
solver._integrator.iwork[2] = -1 # suppress Fortran-printed warning
t = appending_array((int(t1 - t0),))
t.append(solver.t)
R = appending_array((int(t1 - t0), 4))
R.append(solver.y)
warnings.filterwarnings('ignore', category=UserWarning)
t_last = solver.t
while solver.t < t1:
solver.integrate(t1, step=True)
if solver.t > t_last:
t.append(solver.t)
R.append(solver.y)
t_last = solver.t # depends on [control=['if'], data=['t_last']] # depends on [control=['while'], data=['t1']]
warnings.resetwarnings()
t = t.a
R = quaternion.as_quat_array(R.a)
return (t, R) |
def pull(self, conf, ignore_missing=False):
"""Push this image"""
with Builder().remove_replaced_images(conf):
self.push_or_pull(conf, "pull", ignore_missing=ignore_missing) | def function[pull, parameter[self, conf, ignore_missing]]:
constant[Push this image]
with call[call[name[Builder], parameter[]].remove_replaced_images, parameter[name[conf]]] begin[:]
call[name[self].push_or_pull, parameter[name[conf], constant[pull]]] | keyword[def] identifier[pull] ( identifier[self] , identifier[conf] , identifier[ignore_missing] = keyword[False] ):
literal[string]
keyword[with] identifier[Builder] (). identifier[remove_replaced_images] ( identifier[conf] ):
identifier[self] . identifier[push_or_pull] ( identifier[conf] , literal[string] , identifier[ignore_missing] = identifier[ignore_missing] ) | def pull(self, conf, ignore_missing=False):
"""Push this image"""
with Builder().remove_replaced_images(conf):
self.push_or_pull(conf, 'pull', ignore_missing=ignore_missing) # depends on [control=['with'], data=[]] |
def epochs_joint(ts, variability=None, threshold=0.0, minlength=1.0,
proportion=0.75, plot=True):
"""Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point)
"""
variability, allchannels_epochs = ts.epochs_distributed(
variability, threshold, minlength, plot=False)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
allchannels_epochs = [allchannels_epochs]
variability = variability[:, np.newaxis]
channels = ts.shape[1]
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1)
starts = [(e[0], 1) for channel in allchannels_epochs for e in channel]
ends = [(e[1], -1) for channel in allchannels_epochs for e in channel]
all = sorted(starts + ends)
joint_epochs = []
in_joint_epoch = False
joint_start = 0.0
inside_count = 0
for bound in all:
inside_count += bound[1]
if not in_joint_epoch and 1.0*inside_count/channels >= proportion:
in_joint_epoch = True
joint_start = bound[0]
if in_joint_epoch and 1.0*inside_count/channels < proportion:
in_joint_epoch = False
joint_end = bound[0]
if (joint_end - joint_start)*dt >= minlength:
joint_epochs.append((joint_start, joint_end))
if plot:
joint_epochs_repeated = [joint_epochs] * channels
_plot_variability(ts, variability, threshold, joint_epochs_repeated)
return (variability, joint_epochs) | def function[epochs_joint, parameter[ts, variability, threshold, minlength, proportion, plot]]:
constant[Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point)
]
<ast.Tuple object at 0x7da204622860> assign[=] call[name[ts].epochs_distributed, parameter[name[variability], name[threshold], name[minlength]]]
variable[orig_ndim] assign[=] name[ts].ndim
if compare[name[ts].ndim is constant[1]] begin[:]
variable[ts] assign[=] call[name[ts]][tuple[[<ast.Slice object at 0x7da18bcc9810>, <ast.Attribute object at 0x7da18bcc9060>]]]
variable[allchannels_epochs] assign[=] list[[<ast.Name object at 0x7da18f00c9d0>]]
variable[variability] assign[=] call[name[variability]][tuple[[<ast.Slice object at 0x7da18f00f850>, <ast.Attribute object at 0x7da18f00f460>]]]
variable[channels] assign[=] call[name[ts].shape][constant[1]]
variable[dt] assign[=] binary_operation[binary_operation[binary_operation[constant[1.0] * call[name[ts].tspan][<ast.UnaryOp object at 0x7da18f00ee30>]] - call[name[ts].tspan][constant[0]]] / binary_operation[call[name[len], parameter[name[ts]]] - constant[1]]]
variable[starts] assign[=] <ast.ListComp object at 0x7da18f00fe20>
variable[ends] assign[=] <ast.ListComp object at 0x7da2054a5300>
variable[all] assign[=] call[name[sorted], parameter[binary_operation[name[starts] + name[ends]]]]
variable[joint_epochs] assign[=] list[[]]
variable[in_joint_epoch] assign[=] constant[False]
variable[joint_start] assign[=] constant[0.0]
variable[inside_count] assign[=] constant[0]
for taget[name[bound]] in starred[name[all]] begin[:]
<ast.AugAssign object at 0x7da2054a6d10>
if <ast.BoolOp object at 0x7da2054a6050> begin[:]
variable[in_joint_epoch] assign[=] constant[True]
variable[joint_start] assign[=] call[name[bound]][constant[0]]
if <ast.BoolOp object at 0x7da2054a4c70> begin[:]
variable[in_joint_epoch] assign[=] constant[False]
variable[joint_end] assign[=] call[name[bound]][constant[0]]
if compare[binary_operation[binary_operation[name[joint_end] - name[joint_start]] * name[dt]] greater_or_equal[>=] name[minlength]] begin[:]
call[name[joint_epochs].append, parameter[tuple[[<ast.Name object at 0x7da2054a7a30>, <ast.Name object at 0x7da2054a49d0>]]]]
if name[plot] begin[:]
variable[joint_epochs_repeated] assign[=] binary_operation[list[[<ast.Name object at 0x7da2054a58d0>]] * name[channels]]
call[name[_plot_variability], parameter[name[ts], name[variability], name[threshold], name[joint_epochs_repeated]]]
return[tuple[[<ast.Name object at 0x7da2054a54e0>, <ast.Name object at 0x7da2054a5450>]]] | keyword[def] identifier[epochs_joint] ( identifier[ts] , identifier[variability] = keyword[None] , identifier[threshold] = literal[int] , identifier[minlength] = literal[int] ,
identifier[proportion] = literal[int] , identifier[plot] = keyword[True] ):
literal[string]
identifier[variability] , identifier[allchannels_epochs] = identifier[ts] . identifier[epochs_distributed] (
identifier[variability] , identifier[threshold] , identifier[minlength] , identifier[plot] = keyword[False] )
identifier[orig_ndim] = identifier[ts] . identifier[ndim]
keyword[if] identifier[ts] . identifier[ndim] keyword[is] literal[int] :
identifier[ts] = identifier[ts] [:, identifier[np] . identifier[newaxis] ]
identifier[allchannels_epochs] =[ identifier[allchannels_epochs] ]
identifier[variability] = identifier[variability] [:, identifier[np] . identifier[newaxis] ]
identifier[channels] = identifier[ts] . identifier[shape] [ literal[int] ]
identifier[dt] =( literal[int] * identifier[ts] . identifier[tspan] [- literal[int] ]- identifier[ts] . identifier[tspan] [ literal[int] ])/( identifier[len] ( identifier[ts] )- literal[int] )
identifier[starts] =[( identifier[e] [ literal[int] ], literal[int] ) keyword[for] identifier[channel] keyword[in] identifier[allchannels_epochs] keyword[for] identifier[e] keyword[in] identifier[channel] ]
identifier[ends] =[( identifier[e] [ literal[int] ],- literal[int] ) keyword[for] identifier[channel] keyword[in] identifier[allchannels_epochs] keyword[for] identifier[e] keyword[in] identifier[channel] ]
identifier[all] = identifier[sorted] ( identifier[starts] + identifier[ends] )
identifier[joint_epochs] =[]
identifier[in_joint_epoch] = keyword[False]
identifier[joint_start] = literal[int]
identifier[inside_count] = literal[int]
keyword[for] identifier[bound] keyword[in] identifier[all] :
identifier[inside_count] += identifier[bound] [ literal[int] ]
keyword[if] keyword[not] identifier[in_joint_epoch] keyword[and] literal[int] * identifier[inside_count] / identifier[channels] >= identifier[proportion] :
identifier[in_joint_epoch] = keyword[True]
identifier[joint_start] = identifier[bound] [ literal[int] ]
keyword[if] identifier[in_joint_epoch] keyword[and] literal[int] * identifier[inside_count] / identifier[channels] < identifier[proportion] :
identifier[in_joint_epoch] = keyword[False]
identifier[joint_end] = identifier[bound] [ literal[int] ]
keyword[if] ( identifier[joint_end] - identifier[joint_start] )* identifier[dt] >= identifier[minlength] :
identifier[joint_epochs] . identifier[append] (( identifier[joint_start] , identifier[joint_end] ))
keyword[if] identifier[plot] :
identifier[joint_epochs_repeated] =[ identifier[joint_epochs] ]* identifier[channels]
identifier[_plot_variability] ( identifier[ts] , identifier[variability] , identifier[threshold] , identifier[joint_epochs_repeated] )
keyword[return] ( identifier[variability] , identifier[joint_epochs] ) | def epochs_joint(ts, variability=None, threshold=0.0, minlength=1.0, proportion=0.75, plot=True):
"""Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point)
"""
(variability, allchannels_epochs) = ts.epochs_distributed(variability, threshold, minlength, plot=False)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
allchannels_epochs = [allchannels_epochs]
variability = variability[:, np.newaxis] # depends on [control=['if'], data=[]]
channels = ts.shape[1]
dt = (1.0 * ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1)
starts = [(e[0], 1) for channel in allchannels_epochs for e in channel]
ends = [(e[1], -1) for channel in allchannels_epochs for e in channel]
all = sorted(starts + ends)
joint_epochs = []
in_joint_epoch = False
joint_start = 0.0
inside_count = 0
for bound in all:
inside_count += bound[1]
if not in_joint_epoch and 1.0 * inside_count / channels >= proportion:
in_joint_epoch = True
joint_start = bound[0] # depends on [control=['if'], data=[]]
if in_joint_epoch and 1.0 * inside_count / channels < proportion:
in_joint_epoch = False
joint_end = bound[0]
if (joint_end - joint_start) * dt >= minlength:
joint_epochs.append((joint_start, joint_end)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['bound']]
if plot:
joint_epochs_repeated = [joint_epochs] * channels
_plot_variability(ts, variability, threshold, joint_epochs_repeated) # depends on [control=['if'], data=[]]
return (variability, joint_epochs) |
def __compress_attributes(self, dic):
"""
This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn
t make sense.
:param dic:
:return:
"""
result = {}
for k, v in dic.iteritems():
if isinstance(v, types.ListType) and len(v) == 1:
if k not in ('msExchMailboxSecurityDescriptor', 'msExchSafeSendersHash', 'msExchBlockedSendersHash',
'replicationSignature', 'msExchSafeRecipientsHash', 'sIDHistory',
'msRTCSIP-UserRoutingGroupId', 'mSMQDigests', 'mSMQSignCertificates',
'msExchMasterAccountSid', 'msExchPreviousAccountSid', 'msExchUMPinChecksum',
'userSMIMECertificate', 'userCertificate', 'userCert',
'msExchDisabledArchiveGUID', 'msExchUMPinChecksum', 'msExchUMSpokenName',
'objectSid', 'objectGUID', 'msExchArchiveGUID', 'thumbnailPhoto', 'msExchMailboxGuid'):
try:
result[k] = v[0].decode('utf-8')
except Exception as e:
logging. error("Failed to decode attribute: %s -- %s" % (k, e))
result[k] = v[0]
return result | def function[__compress_attributes, parameter[self, dic]]:
constant[
This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn
t make sense.
:param dic:
:return:
]
variable[result] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b2847e80>, <ast.Name object at 0x7da1b2847a90>]]] in starred[call[name[dic].iteritems, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b2725780> begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b2726ad0>, <ast.Constant object at 0x7da1b2725330>, <ast.Constant object at 0x7da1b2725270>, <ast.Constant object at 0x7da1b2727820>, <ast.Constant object at 0x7da1b2726200>, <ast.Constant object at 0x7da1b2727070>, <ast.Constant object at 0x7da1b2727a90>, <ast.Constant object at 0x7da1b2725810>, <ast.Constant object at 0x7da1b27257e0>, <ast.Constant object at 0x7da1b27276a0>, <ast.Constant object at 0x7da1b2724940>, <ast.Constant object at 0x7da1b2726500>, <ast.Constant object at 0x7da1b2727b20>, <ast.Constant object at 0x7da1b2727550>, <ast.Constant object at 0x7da1b2725a20>, <ast.Constant object at 0x7da1b27275b0>, <ast.Constant object at 0x7da1b2726170>, <ast.Constant object at 0x7da1b27261d0>, <ast.Constant object at 0x7da1b27263b0>, <ast.Constant object at 0x7da1b2725000>, <ast.Constant object at 0x7da1b2727910>, <ast.Constant object at 0x7da1b2724a90>, <ast.Constant object at 0x7da1b2726920>]]] begin[:]
<ast.Try object at 0x7da1b2727a60>
return[name[result]] | keyword[def] identifier[__compress_attributes] ( identifier[self] , identifier[dic] ):
literal[string]
identifier[result] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dic] . identifier[iteritems] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[types] . identifier[ListType] ) keyword[and] identifier[len] ( identifier[v] )== literal[int] :
keyword[if] identifier[k] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[try] :
identifier[result] [ identifier[k] ]= identifier[v] [ literal[int] ]. identifier[decode] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( literal[string] %( identifier[k] , identifier[e] ))
identifier[result] [ identifier[k] ]= identifier[v] [ literal[int] ]
keyword[return] identifier[result] | def __compress_attributes(self, dic):
"""
This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn
t make sense.
:param dic:
:return:
"""
result = {}
for (k, v) in dic.iteritems():
if isinstance(v, types.ListType) and len(v) == 1:
if k not in ('msExchMailboxSecurityDescriptor', 'msExchSafeSendersHash', 'msExchBlockedSendersHash', 'replicationSignature', 'msExchSafeRecipientsHash', 'sIDHistory', 'msRTCSIP-UserRoutingGroupId', 'mSMQDigests', 'mSMQSignCertificates', 'msExchMasterAccountSid', 'msExchPreviousAccountSid', 'msExchUMPinChecksum', 'userSMIMECertificate', 'userCertificate', 'userCert', 'msExchDisabledArchiveGUID', 'msExchUMPinChecksum', 'msExchUMSpokenName', 'objectSid', 'objectGUID', 'msExchArchiveGUID', 'thumbnailPhoto', 'msExchMailboxGuid'):
try:
result[k] = v[0].decode('utf-8') # depends on [control=['try'], data=[]]
except Exception as e:
logging.error('Failed to decode attribute: %s -- %s' % (k, e))
result[k] = v[0] # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['k']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return result |
def validate_sqlite_attr_name(name):
"""
:param str name: Name to validate.
:raises pathvalidate.NullNameError: If the ``name`` is empty.
:raises pathvalidate.InvalidCharError:
If the ``name`` includes unprintable character(s).
:raises pathvalidate.InvalidReservedNameError:
|raises_sqlite_keywords|
And invalid as an attribute name.
:raises pathvalidate.ValidReservedNameError:
|raises_sqlite_keywords|
However, valid as an attribute name.
"""
if not name:
raise NullNameError("null name")
if __RE_INVALID_CHARS.search(name):
raise InvalidCharError("unprintable character found")
name = name.upper()
if name in __SQLITE_INVALID_RESERVED_KEYWORDS_ATTR:
raise InvalidReservedNameError("'{}' is a reserved keyword by sqlite".format(name))
if name in __SQLITE_VALID_RESERVED_KEYWORDS_ATTR:
raise ValidReservedNameError("'{}' is a reserved keyword by sqlite".format(name)) | def function[validate_sqlite_attr_name, parameter[name]]:
constant[
:param str name: Name to validate.
:raises pathvalidate.NullNameError: If the ``name`` is empty.
:raises pathvalidate.InvalidCharError:
If the ``name`` includes unprintable character(s).
:raises pathvalidate.InvalidReservedNameError:
|raises_sqlite_keywords|
And invalid as an attribute name.
:raises pathvalidate.ValidReservedNameError:
|raises_sqlite_keywords|
However, valid as an attribute name.
]
if <ast.UnaryOp object at 0x7da1b04ed810> begin[:]
<ast.Raise object at 0x7da1b04d3460>
if call[name[__RE_INVALID_CHARS].search, parameter[name[name]]] begin[:]
<ast.Raise object at 0x7da1b04d1cf0>
variable[name] assign[=] call[name[name].upper, parameter[]]
if compare[name[name] in name[__SQLITE_INVALID_RESERVED_KEYWORDS_ATTR]] begin[:]
<ast.Raise object at 0x7da1b04d1840>
if compare[name[name] in name[__SQLITE_VALID_RESERVED_KEYWORDS_ATTR]] begin[:]
<ast.Raise object at 0x7da1b04d0790> | keyword[def] identifier[validate_sqlite_attr_name] ( identifier[name] ):
literal[string]
keyword[if] keyword[not] identifier[name] :
keyword[raise] identifier[NullNameError] ( literal[string] )
keyword[if] identifier[__RE_INVALID_CHARS] . identifier[search] ( identifier[name] ):
keyword[raise] identifier[InvalidCharError] ( literal[string] )
identifier[name] = identifier[name] . identifier[upper] ()
keyword[if] identifier[name] keyword[in] identifier[__SQLITE_INVALID_RESERVED_KEYWORDS_ATTR] :
keyword[raise] identifier[InvalidReservedNameError] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[if] identifier[name] keyword[in] identifier[__SQLITE_VALID_RESERVED_KEYWORDS_ATTR] :
keyword[raise] identifier[ValidReservedNameError] ( literal[string] . identifier[format] ( identifier[name] )) | def validate_sqlite_attr_name(name):
"""
:param str name: Name to validate.
:raises pathvalidate.NullNameError: If the ``name`` is empty.
:raises pathvalidate.InvalidCharError:
If the ``name`` includes unprintable character(s).
:raises pathvalidate.InvalidReservedNameError:
|raises_sqlite_keywords|
And invalid as an attribute name.
:raises pathvalidate.ValidReservedNameError:
|raises_sqlite_keywords|
However, valid as an attribute name.
"""
if not name:
raise NullNameError('null name') # depends on [control=['if'], data=[]]
if __RE_INVALID_CHARS.search(name):
raise InvalidCharError('unprintable character found') # depends on [control=['if'], data=[]]
name = name.upper()
if name in __SQLITE_INVALID_RESERVED_KEYWORDS_ATTR:
raise InvalidReservedNameError("'{}' is a reserved keyword by sqlite".format(name)) # depends on [control=['if'], data=['name']]
if name in __SQLITE_VALID_RESERVED_KEYWORDS_ATTR:
raise ValidReservedNameError("'{}' is a reserved keyword by sqlite".format(name)) # depends on [control=['if'], data=['name']] |
def _parse(self):
"""! @brief SWO parser as generator function coroutine.
The generator yields every time it needs a byte of SWO data. The caller must use the
generator's send() method to provide the next byte.
"""
timestamp = 0
invalid = False
while True:
byte = yield
hdr = byte
# Sync packet.
if hdr == 0:
packets = 0
while True:
# Check for final 1 bit after at least 5 all-zero sync packets
if (packets >= 5) and (byte == 0x80):
break
elif byte == 0:
packets += 1
else:
# Get early non-zero packet, reset sync packet counter.
#packets = 0
invalid = True
break
byte = yield
self._itm_page = 0
# Overflow packet.
elif hdr == 0x70:
self._send_event(events.TraceOverflow(timestamp))
# Protocol packet.
elif (hdr & 0x3) == 0:
c = (hdr >> 7) & 0x1
d = (hdr >> 4) & 0b111
# Local timestamp.
if (hdr & 0xf) == 0 and d not in (0x0, 0x3):
ts = 0
tc = 0
# Local timestamp packet format 1.
if c == 1:
tc = (hdr >> 4) & 0x3
while c == 1:
byte = yield
ts = (ts << 7) | (byte & 0x7f)
c = (byte >> 7) & 0x1
# Local timestamp packet format 2.
else:
ts = (hdr >> 4) & 0x7
timestamp += ts
self._send_event(events.TraceTimestamp(tc, timestamp))
# Global timestamp.
elif hdr in (0b10010100, 0b10110100):
t = (hdr >> 5) & 0x1
# TODO handle global timestamp
# Extension.
elif (hdr & 0x8) == 0x8:
sh = (hdr >> 2) & 0x1
if c == 0:
ex = (hdr >> 4) & 0x7
else:
ex = 0
while c == 1:
byte = yield
ex = (ex << 7) | (byte & 0x7f)
c = (byte >> 7) & 0x1
if sh == 0:
# Extension packet with sh==0 sets ITM stimulus page.
self._itm_page = ex
else:
#self._send_event(events.TraceEvent("Extension: SH={:d} EX={:#x}\n".format(sh, ex), timestamp))
invalid = True
# Reserved packet.
else:
invalid = True
# Source packet.
else:
ss = hdr & 0x3
l = 1 << (ss - 1)
a = (hdr >> 3) & 0x1f
if l == 1:
payload = yield
elif l == 2:
byte1 = yield
byte2 = yield
payload = (byte1 |
(byte2 << 8))
else:
byte1 = yield
byte2 = yield
byte3 = yield
byte4 = yield
payload = (byte1 |
(byte2 << 8) |
(byte3 << 16) |
(byte4 << 24))
# Instrumentation packet.
if (hdr & 0x4) == 0:
port = (self._itm_page * 32) + a
self._send_event(events.TraceITMEvent(port, payload, l, timestamp))
# Hardware source packets...
# Event counter
elif a == 0:
self._send_event(events.TraceEventCounter(payload, timestamp))
# Exception trace
elif a == 1:
exceptionNumber = payload & 0x1ff
exceptionName = self._core.exception_number_to_name(exceptionNumber, True)
fn = (payload >> 12) & 0x3
if 1 <= fn <= 3:
self._send_event(events.TraceExceptionEvent(exceptionNumber, exceptionName, fn, timestamp))
else:
invalid = True
# Periodic PC
elif a == 2:
# A payload of 0 indicates a period PC sleep event.
self._send_event(events.TracePeriodicPC(payload, timestamp))
# Data trace
elif 8 <= a <= 23:
type = (hdr >> 6) & 0x3
cmpn = (hdr >> 4) & 0x3
bit3 = (hdr >> 3) & 0x1
# PC value
if type == 0b01 and bit3 == 0:
self._send_event(events.TraceDataTraceEvent(cmpn=cmpn, pc=payload, ts=timestamp))
# Address
elif type == 0b01 and bit3 == 1:
self._send_event(events.TraceDataTraceEvent(cmpn=cmpn, addr=payload, ts=timestamp))
# Data value
elif type == 0b10:
self._send_event(events.TraceDataTraceEvent(cmpn=cmpn, value=payload, rnw=(bit3 == 0), sz=l, ts=timestamp))
else:
invalid = True
# Invalid DWT 'a' value.
else:
invalid = True | def function[_parse, parameter[self]]:
constant[! @brief SWO parser as generator function coroutine.
The generator yields every time it needs a byte of SWO data. The caller must use the
generator's send() method to provide the next byte.
]
variable[timestamp] assign[=] constant[0]
variable[invalid] assign[=] constant[False]
while constant[True] begin[:]
variable[byte] assign[=] <ast.Yield object at 0x7da1b19cfca0>
variable[hdr] assign[=] name[byte]
if compare[name[hdr] equal[==] constant[0]] begin[:]
variable[packets] assign[=] constant[0]
while constant[True] begin[:]
if <ast.BoolOp object at 0x7da1b19cf9d0> begin[:]
break
variable[byte] assign[=] <ast.Yield object at 0x7da1b19cf580>
name[self]._itm_page assign[=] constant[0] | keyword[def] identifier[_parse] ( identifier[self] ):
literal[string]
identifier[timestamp] = literal[int]
identifier[invalid] = keyword[False]
keyword[while] keyword[True] :
identifier[byte] = keyword[yield]
identifier[hdr] = identifier[byte]
keyword[if] identifier[hdr] == literal[int] :
identifier[packets] = literal[int]
keyword[while] keyword[True] :
keyword[if] ( identifier[packets] >= literal[int] ) keyword[and] ( identifier[byte] == literal[int] ):
keyword[break]
keyword[elif] identifier[byte] == literal[int] :
identifier[packets] += literal[int]
keyword[else] :
identifier[invalid] = keyword[True]
keyword[break]
identifier[byte] = keyword[yield]
identifier[self] . identifier[_itm_page] = literal[int]
keyword[elif] identifier[hdr] == literal[int] :
identifier[self] . identifier[_send_event] ( identifier[events] . identifier[TraceOverflow] ( identifier[timestamp] ))
keyword[elif] ( identifier[hdr] & literal[int] )== literal[int] :
identifier[c] =( identifier[hdr] >> literal[int] )& literal[int]
identifier[d] =( identifier[hdr] >> literal[int] )& literal[int]
keyword[if] ( identifier[hdr] & literal[int] )== literal[int] keyword[and] identifier[d] keyword[not] keyword[in] ( literal[int] , literal[int] ):
identifier[ts] = literal[int]
identifier[tc] = literal[int]
keyword[if] identifier[c] == literal[int] :
identifier[tc] =( identifier[hdr] >> literal[int] )& literal[int]
keyword[while] identifier[c] == literal[int] :
identifier[byte] = keyword[yield]
identifier[ts] =( identifier[ts] << literal[int] )|( identifier[byte] & literal[int] )
identifier[c] =( identifier[byte] >> literal[int] )& literal[int]
keyword[else] :
identifier[ts] =( identifier[hdr] >> literal[int] )& literal[int]
identifier[timestamp] += identifier[ts]
identifier[self] . identifier[_send_event] ( identifier[events] . identifier[TraceTimestamp] ( identifier[tc] , identifier[timestamp] ))
keyword[elif] identifier[hdr] keyword[in] ( literal[int] , literal[int] ):
identifier[t] =( identifier[hdr] >> literal[int] )& literal[int]
keyword[elif] ( identifier[hdr] & literal[int] )== literal[int] :
identifier[sh] =( identifier[hdr] >> literal[int] )& literal[int]
keyword[if] identifier[c] == literal[int] :
identifier[ex] =( identifier[hdr] >> literal[int] )& literal[int]
keyword[else] :
identifier[ex] = literal[int]
keyword[while] identifier[c] == literal[int] :
identifier[byte] = keyword[yield]
identifier[ex] =( identifier[ex] << literal[int] )|( identifier[byte] & literal[int] )
identifier[c] =( identifier[byte] >> literal[int] )& literal[int]
keyword[if] identifier[sh] == literal[int] :
identifier[self] . identifier[_itm_page] = identifier[ex]
keyword[else] :
identifier[invalid] = keyword[True]
keyword[else] :
identifier[invalid] = keyword[True]
keyword[else] :
identifier[ss] = identifier[hdr] & literal[int]
identifier[l] = literal[int] <<( identifier[ss] - literal[int] )
identifier[a] =( identifier[hdr] >> literal[int] )& literal[int]
keyword[if] identifier[l] == literal[int] :
identifier[payload] = keyword[yield]
keyword[elif] identifier[l] == literal[int] :
identifier[byte1] = keyword[yield]
identifier[byte2] = keyword[yield]
identifier[payload] =( identifier[byte1] |
( identifier[byte2] << literal[int] ))
keyword[else] :
identifier[byte1] = keyword[yield]
identifier[byte2] = keyword[yield]
identifier[byte3] = keyword[yield]
identifier[byte4] = keyword[yield]
identifier[payload] =( identifier[byte1] |
( identifier[byte2] << literal[int] )|
( identifier[byte3] << literal[int] )|
( identifier[byte4] << literal[int] ))
keyword[if] ( identifier[hdr] & literal[int] )== literal[int] :
identifier[port] =( identifier[self] . identifier[_itm_page] * literal[int] )+ identifier[a]
identifier[self] . identifier[_send_event] ( identifier[events] . identifier[TraceITMEvent] ( identifier[port] , identifier[payload] , identifier[l] , identifier[timestamp] ))
keyword[elif] identifier[a] == literal[int] :
identifier[self] . identifier[_send_event] ( identifier[events] . identifier[TraceEventCounter] ( identifier[payload] , identifier[timestamp] ))
keyword[elif] identifier[a] == literal[int] :
identifier[exceptionNumber] = identifier[payload] & literal[int]
identifier[exceptionName] = identifier[self] . identifier[_core] . identifier[exception_number_to_name] ( identifier[exceptionNumber] , keyword[True] )
identifier[fn] =( identifier[payload] >> literal[int] )& literal[int]
keyword[if] literal[int] <= identifier[fn] <= literal[int] :
identifier[self] . identifier[_send_event] ( identifier[events] . identifier[TraceExceptionEvent] ( identifier[exceptionNumber] , identifier[exceptionName] , identifier[fn] , identifier[timestamp] ))
keyword[else] :
identifier[invalid] = keyword[True]
keyword[elif] identifier[a] == literal[int] :
identifier[self] . identifier[_send_event] ( identifier[events] . identifier[TracePeriodicPC] ( identifier[payload] , identifier[timestamp] ))
keyword[elif] literal[int] <= identifier[a] <= literal[int] :
identifier[type] =( identifier[hdr] >> literal[int] )& literal[int]
identifier[cmpn] =( identifier[hdr] >> literal[int] )& literal[int]
identifier[bit3] =( identifier[hdr] >> literal[int] )& literal[int]
keyword[if] identifier[type] == literal[int] keyword[and] identifier[bit3] == literal[int] :
identifier[self] . identifier[_send_event] ( identifier[events] . identifier[TraceDataTraceEvent] ( identifier[cmpn] = identifier[cmpn] , identifier[pc] = identifier[payload] , identifier[ts] = identifier[timestamp] ))
keyword[elif] identifier[type] == literal[int] keyword[and] identifier[bit3] == literal[int] :
identifier[self] . identifier[_send_event] ( identifier[events] . identifier[TraceDataTraceEvent] ( identifier[cmpn] = identifier[cmpn] , identifier[addr] = identifier[payload] , identifier[ts] = identifier[timestamp] ))
keyword[elif] identifier[type] == literal[int] :
identifier[self] . identifier[_send_event] ( identifier[events] . identifier[TraceDataTraceEvent] ( identifier[cmpn] = identifier[cmpn] , identifier[value] = identifier[payload] , identifier[rnw] =( identifier[bit3] == literal[int] ), identifier[sz] = identifier[l] , identifier[ts] = identifier[timestamp] ))
keyword[else] :
identifier[invalid] = keyword[True]
keyword[else] :
identifier[invalid] = keyword[True] | def _parse(self):
"""! @brief SWO parser as generator function coroutine.
The generator yields every time it needs a byte of SWO data. The caller must use the
generator's send() method to provide the next byte.
"""
timestamp = 0
invalid = False
while True:
byte = (yield)
hdr = byte
# Sync packet.
if hdr == 0:
packets = 0
while True:
# Check for final 1 bit after at least 5 all-zero sync packets
if packets >= 5 and byte == 128:
break # depends on [control=['if'], data=[]]
elif byte == 0:
packets += 1 # depends on [control=['if'], data=[]]
else:
# Get early non-zero packet, reset sync packet counter.
#packets = 0
invalid = True
break
byte = (yield) # depends on [control=['while'], data=[]]
self._itm_page = 0 # depends on [control=['if'], data=[]]
# Overflow packet.
elif hdr == 112:
self._send_event(events.TraceOverflow(timestamp)) # depends on [control=['if'], data=[]]
# Protocol packet.
elif hdr & 3 == 0:
c = hdr >> 7 & 1
d = hdr >> 4 & 7
# Local timestamp.
if hdr & 15 == 0 and d not in (0, 3):
ts = 0
tc = 0
# Local timestamp packet format 1.
if c == 1:
tc = hdr >> 4 & 3
while c == 1:
byte = (yield)
ts = ts << 7 | byte & 127
c = byte >> 7 & 1 # depends on [control=['while'], data=['c']] # depends on [control=['if'], data=['c']]
else:
# Local timestamp packet format 2.
ts = hdr >> 4 & 7
timestamp += ts
self._send_event(events.TraceTimestamp(tc, timestamp)) # depends on [control=['if'], data=[]]
# Global timestamp.
elif hdr in (148, 180):
t = hdr >> 5 & 1 # depends on [control=['if'], data=['hdr']]
# TODO handle global timestamp
# Extension.
elif hdr & 8 == 8:
sh = hdr >> 2 & 1
if c == 0:
ex = hdr >> 4 & 7 # depends on [control=['if'], data=[]]
else:
ex = 0
while c == 1:
byte = (yield)
ex = ex << 7 | byte & 127
c = byte >> 7 & 1 # depends on [control=['while'], data=['c']]
if sh == 0:
# Extension packet with sh==0 sets ITM stimulus page.
self._itm_page = ex # depends on [control=['if'], data=[]]
else:
#self._send_event(events.TraceEvent("Extension: SH={:d} EX={:#x}\n".format(sh, ex), timestamp))
invalid = True # depends on [control=['if'], data=[]]
else:
# Reserved packet.
invalid = True # depends on [control=['if'], data=[]]
else:
# Source packet.
ss = hdr & 3
l = 1 << ss - 1
a = hdr >> 3 & 31
if l == 1:
payload = (yield) # depends on [control=['if'], data=[]]
elif l == 2:
byte1 = (yield)
byte2 = (yield)
payload = byte1 | byte2 << 8 # depends on [control=['if'], data=[]]
else:
byte1 = (yield)
byte2 = (yield)
byte3 = (yield)
byte4 = (yield)
payload = byte1 | byte2 << 8 | byte3 << 16 | byte4 << 24
# Instrumentation packet.
if hdr & 4 == 0:
port = self._itm_page * 32 + a
self._send_event(events.TraceITMEvent(port, payload, l, timestamp)) # depends on [control=['if'], data=[]]
# Hardware source packets...
# Event counter
elif a == 0:
self._send_event(events.TraceEventCounter(payload, timestamp)) # depends on [control=['if'], data=[]]
# Exception trace
elif a == 1:
exceptionNumber = payload & 511
exceptionName = self._core.exception_number_to_name(exceptionNumber, True)
fn = payload >> 12 & 3
if 1 <= fn <= 3:
self._send_event(events.TraceExceptionEvent(exceptionNumber, exceptionName, fn, timestamp)) # depends on [control=['if'], data=['fn']]
else:
invalid = True # depends on [control=['if'], data=[]]
# Periodic PC
elif a == 2:
# A payload of 0 indicates a period PC sleep event.
self._send_event(events.TracePeriodicPC(payload, timestamp)) # depends on [control=['if'], data=[]]
# Data trace
elif 8 <= a <= 23:
type = hdr >> 6 & 3
cmpn = hdr >> 4 & 3
bit3 = hdr >> 3 & 1
# PC value
if type == 1 and bit3 == 0:
self._send_event(events.TraceDataTraceEvent(cmpn=cmpn, pc=payload, ts=timestamp)) # depends on [control=['if'], data=[]]
# Address
elif type == 1 and bit3 == 1:
self._send_event(events.TraceDataTraceEvent(cmpn=cmpn, addr=payload, ts=timestamp)) # depends on [control=['if'], data=[]]
# Data value
elif type == 2:
self._send_event(events.TraceDataTraceEvent(cmpn=cmpn, value=payload, rnw=bit3 == 0, sz=l, ts=timestamp)) # depends on [control=['if'], data=[]]
else:
invalid = True # depends on [control=['if'], data=[]]
else:
# Invalid DWT 'a' value.
invalid = True # depends on [control=['while'], data=[]] |
def send_messages(self, topic, partition, *msg):
"""Helper method to send produce requests.
Note that msg type *must* be encoded to bytes by user. Passing unicode
message will not work, for example you should encode before calling
send_messages via something like `unicode_message.encode('utf-8')`
All messages will set the message 'key' to None.
Arguments:
topic (str): name of topic for produce request
partition (int): partition number for produce request
*msg (bytes): one or more message payloads
Returns:
ResponseRequest returned by server
Raises:
FailedPayloadsError: low-level connection error, can be caused by
networking failures, or a malformed request.
KafkaUnavailableError: all known brokers are down when attempting
to refresh metadata.
LeaderNotAvailableError: topic or partition is initializing or
a broker failed and leadership election is in progress.
NotLeaderForPartitionError: metadata is out of sync; the broker
that the request was sent to is not the leader for the topic
or partition.
UnknownTopicOrPartitionError: the topic or partition has not
been created yet and auto-creation is not available.
AsyncProducerQueueFull: in async mode, if too many messages are
unsent and remain in the internal queue.
"""
return self._send_messages(topic, partition, *msg) | def function[send_messages, parameter[self, topic, partition]]:
constant[Helper method to send produce requests.
Note that msg type *must* be encoded to bytes by user. Passing unicode
message will not work, for example you should encode before calling
send_messages via something like `unicode_message.encode('utf-8')`
All messages will set the message 'key' to None.
Arguments:
topic (str): name of topic for produce request
partition (int): partition number for produce request
*msg (bytes): one or more message payloads
Returns:
ResponseRequest returned by server
Raises:
FailedPayloadsError: low-level connection error, can be caused by
networking failures, or a malformed request.
KafkaUnavailableError: all known brokers are down when attempting
to refresh metadata.
LeaderNotAvailableError: topic or partition is initializing or
a broker failed and leadership election is in progress.
NotLeaderForPartitionError: metadata is out of sync; the broker
that the request was sent to is not the leader for the topic
or partition.
UnknownTopicOrPartitionError: the topic or partition has not
been created yet and auto-creation is not available.
AsyncProducerQueueFull: in async mode, if too many messages are
unsent and remain in the internal queue.
]
return[call[name[self]._send_messages, parameter[name[topic], name[partition], <ast.Starred object at 0x7da1b2178790>]]] | keyword[def] identifier[send_messages] ( identifier[self] , identifier[topic] , identifier[partition] ,* identifier[msg] ):
literal[string]
keyword[return] identifier[self] . identifier[_send_messages] ( identifier[topic] , identifier[partition] ,* identifier[msg] ) | def send_messages(self, topic, partition, *msg):
"""Helper method to send produce requests.
Note that msg type *must* be encoded to bytes by user. Passing unicode
message will not work, for example you should encode before calling
send_messages via something like `unicode_message.encode('utf-8')`
All messages will set the message 'key' to None.
Arguments:
topic (str): name of topic for produce request
partition (int): partition number for produce request
*msg (bytes): one or more message payloads
Returns:
ResponseRequest returned by server
Raises:
FailedPayloadsError: low-level connection error, can be caused by
networking failures, or a malformed request.
KafkaUnavailableError: all known brokers are down when attempting
to refresh metadata.
LeaderNotAvailableError: topic or partition is initializing or
a broker failed and leadership election is in progress.
NotLeaderForPartitionError: metadata is out of sync; the broker
that the request was sent to is not the leader for the topic
or partition.
UnknownTopicOrPartitionError: the topic or partition has not
been created yet and auto-creation is not available.
AsyncProducerQueueFull: in async mode, if too many messages are
unsent and remain in the internal queue.
"""
return self._send_messages(topic, partition, *msg) |
def parse_filter_params(query_params, filterable):
"""
Parse query_params to a filter params dict. Merge multiple values for one key to a list.
Filter out keys that aren't filterable.
:param query_params: query params
:param filterable: list of filterable keys
:return: dict of filter values
"""
if query_params is not None:
filter_params = {}
for fq in query_params.mixed():
if fq in filterable:
filter_params[fq] = query_params.mixed().get(fq)
return filter_params
else:
return {} | def function[parse_filter_params, parameter[query_params, filterable]]:
constant[
Parse query_params to a filter params dict. Merge multiple values for one key to a list.
Filter out keys that aren't filterable.
:param query_params: query params
:param filterable: list of filterable keys
:return: dict of filter values
]
if compare[name[query_params] is_not constant[None]] begin[:]
variable[filter_params] assign[=] dictionary[[], []]
for taget[name[fq]] in starred[call[name[query_params].mixed, parameter[]]] begin[:]
if compare[name[fq] in name[filterable]] begin[:]
call[name[filter_params]][name[fq]] assign[=] call[call[name[query_params].mixed, parameter[]].get, parameter[name[fq]]]
return[name[filter_params]] | keyword[def] identifier[parse_filter_params] ( identifier[query_params] , identifier[filterable] ):
literal[string]
keyword[if] identifier[query_params] keyword[is] keyword[not] keyword[None] :
identifier[filter_params] ={}
keyword[for] identifier[fq] keyword[in] identifier[query_params] . identifier[mixed] ():
keyword[if] identifier[fq] keyword[in] identifier[filterable] :
identifier[filter_params] [ identifier[fq] ]= identifier[query_params] . identifier[mixed] (). identifier[get] ( identifier[fq] )
keyword[return] identifier[filter_params]
keyword[else] :
keyword[return] {} | def parse_filter_params(query_params, filterable):
"""
Parse query_params to a filter params dict. Merge multiple values for one key to a list.
Filter out keys that aren't filterable.
:param query_params: query params
:param filterable: list of filterable keys
:return: dict of filter values
"""
if query_params is not None:
filter_params = {}
for fq in query_params.mixed():
if fq in filterable:
filter_params[fq] = query_params.mixed().get(fq) # depends on [control=['if'], data=['fq']] # depends on [control=['for'], data=['fq']]
return filter_params # depends on [control=['if'], data=['query_params']]
else:
return {} |
def _serialize(self, entity, pb, prefix='', parent_repeated=False,
projection=None):
"""Internal helper to serialize this property to a protocol buffer.
Subclasses may override this method.
Args:
entity: The entity, a Model (subclass) instance.
pb: The protocol buffer, an EntityProto instance.
prefix: Optional name prefix used for StructuredProperty
(if present, must end in '.').
parent_repeated: True if the parent (or an earlier ancestor)
is a repeated Property.
projection: A list or tuple of strings representing the projection for
the model instance, or None if the instance is not a projection.
"""
values = self._get_base_value_unwrapped_as_list(entity)
name = prefix + self._name
if projection and name not in projection:
return
if self._indexed:
create_prop = lambda: pb.add_property()
else:
create_prop = lambda: pb.add_raw_property()
if self._repeated and not values and self._write_empty_list:
# We want to write the empty list
p = create_prop()
p.set_name(name)
p.set_multiple(False)
p.set_meaning(entity_pb.Property.EMPTY_LIST)
p.mutable_value()
else:
# We write a list, or a single property
for val in values:
p = create_prop()
p.set_name(name)
p.set_multiple(self._repeated or parent_repeated)
v = p.mutable_value()
if val is not None:
self._db_set_value(v, p, val)
if projection:
# Projected properties have the INDEX_VALUE meaning and only contain
# the original property's name and value.
new_p = entity_pb.Property()
new_p.set_name(p.name())
new_p.set_meaning(entity_pb.Property.INDEX_VALUE)
new_p.set_multiple(False)
new_p.mutable_value().CopyFrom(v)
p.CopyFrom(new_p) | def function[_serialize, parameter[self, entity, pb, prefix, parent_repeated, projection]]:
constant[Internal helper to serialize this property to a protocol buffer.
Subclasses may override this method.
Args:
entity: The entity, a Model (subclass) instance.
pb: The protocol buffer, an EntityProto instance.
prefix: Optional name prefix used for StructuredProperty
(if present, must end in '.').
parent_repeated: True if the parent (or an earlier ancestor)
is a repeated Property.
projection: A list or tuple of strings representing the projection for
the model instance, or None if the instance is not a projection.
]
variable[values] assign[=] call[name[self]._get_base_value_unwrapped_as_list, parameter[name[entity]]]
variable[name] assign[=] binary_operation[name[prefix] + name[self]._name]
if <ast.BoolOp object at 0x7da1b11ef9a0> begin[:]
return[None]
if name[self]._indexed begin[:]
variable[create_prop] assign[=] <ast.Lambda object at 0x7da1b11ee560>
if <ast.BoolOp object at 0x7da1b11ece80> begin[:]
variable[p] assign[=] call[name[create_prop], parameter[]]
call[name[p].set_name, parameter[name[name]]]
call[name[p].set_multiple, parameter[constant[False]]]
call[name[p].set_meaning, parameter[name[entity_pb].Property.EMPTY_LIST]]
call[name[p].mutable_value, parameter[]] | keyword[def] identifier[_serialize] ( identifier[self] , identifier[entity] , identifier[pb] , identifier[prefix] = literal[string] , identifier[parent_repeated] = keyword[False] ,
identifier[projection] = keyword[None] ):
literal[string]
identifier[values] = identifier[self] . identifier[_get_base_value_unwrapped_as_list] ( identifier[entity] )
identifier[name] = identifier[prefix] + identifier[self] . identifier[_name]
keyword[if] identifier[projection] keyword[and] identifier[name] keyword[not] keyword[in] identifier[projection] :
keyword[return]
keyword[if] identifier[self] . identifier[_indexed] :
identifier[create_prop] = keyword[lambda] : identifier[pb] . identifier[add_property] ()
keyword[else] :
identifier[create_prop] = keyword[lambda] : identifier[pb] . identifier[add_raw_property] ()
keyword[if] identifier[self] . identifier[_repeated] keyword[and] keyword[not] identifier[values] keyword[and] identifier[self] . identifier[_write_empty_list] :
identifier[p] = identifier[create_prop] ()
identifier[p] . identifier[set_name] ( identifier[name] )
identifier[p] . identifier[set_multiple] ( keyword[False] )
identifier[p] . identifier[set_meaning] ( identifier[entity_pb] . identifier[Property] . identifier[EMPTY_LIST] )
identifier[p] . identifier[mutable_value] ()
keyword[else] :
keyword[for] identifier[val] keyword[in] identifier[values] :
identifier[p] = identifier[create_prop] ()
identifier[p] . identifier[set_name] ( identifier[name] )
identifier[p] . identifier[set_multiple] ( identifier[self] . identifier[_repeated] keyword[or] identifier[parent_repeated] )
identifier[v] = identifier[p] . identifier[mutable_value] ()
keyword[if] identifier[val] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_db_set_value] ( identifier[v] , identifier[p] , identifier[val] )
keyword[if] identifier[projection] :
identifier[new_p] = identifier[entity_pb] . identifier[Property] ()
identifier[new_p] . identifier[set_name] ( identifier[p] . identifier[name] ())
identifier[new_p] . identifier[set_meaning] ( identifier[entity_pb] . identifier[Property] . identifier[INDEX_VALUE] )
identifier[new_p] . identifier[set_multiple] ( keyword[False] )
identifier[new_p] . identifier[mutable_value] (). identifier[CopyFrom] ( identifier[v] )
identifier[p] . identifier[CopyFrom] ( identifier[new_p] ) | def _serialize(self, entity, pb, prefix='', parent_repeated=False, projection=None):
"""Internal helper to serialize this property to a protocol buffer.
Subclasses may override this method.
Args:
entity: The entity, a Model (subclass) instance.
pb: The protocol buffer, an EntityProto instance.
prefix: Optional name prefix used for StructuredProperty
(if present, must end in '.').
parent_repeated: True if the parent (or an earlier ancestor)
is a repeated Property.
projection: A list or tuple of strings representing the projection for
the model instance, or None if the instance is not a projection.
"""
values = self._get_base_value_unwrapped_as_list(entity)
name = prefix + self._name
if projection and name not in projection:
return # depends on [control=['if'], data=[]]
if self._indexed:
create_prop = lambda : pb.add_property() # depends on [control=['if'], data=[]]
else:
create_prop = lambda : pb.add_raw_property()
if self._repeated and (not values) and self._write_empty_list:
# We want to write the empty list
p = create_prop()
p.set_name(name)
p.set_multiple(False)
p.set_meaning(entity_pb.Property.EMPTY_LIST)
p.mutable_value() # depends on [control=['if'], data=[]]
else:
# We write a list, or a single property
for val in values:
p = create_prop()
p.set_name(name)
p.set_multiple(self._repeated or parent_repeated)
v = p.mutable_value()
if val is not None:
self._db_set_value(v, p, val)
if projection:
# Projected properties have the INDEX_VALUE meaning and only contain
# the original property's name and value.
new_p = entity_pb.Property()
new_p.set_name(p.name())
new_p.set_meaning(entity_pb.Property.INDEX_VALUE)
new_p.set_multiple(False)
new_p.mutable_value().CopyFrom(v)
p.CopyFrom(new_p) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['val']] # depends on [control=['for'], data=['val']] |
def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
h, m, s, frac = map(int, groups)
ms = frac * 10**(3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms | def function[timestamp_to_ms, parameter[groups]]:
constant[
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
]
<ast.Tuple object at 0x7da20c991e70> assign[=] call[name[map], parameter[name[int], name[groups]]]
variable[ms] assign[=] binary_operation[name[frac] * binary_operation[constant[10] ** binary_operation[constant[3] - call[name[len], parameter[call[name[groups]][<ast.UnaryOp object at 0x7da20c6aa950>]]]]]]
<ast.AugAssign object at 0x7da20c6aa2c0>
<ast.AugAssign object at 0x7da20c6aa590>
<ast.AugAssign object at 0x7da20c6a9210>
return[name[ms]] | keyword[def] identifier[timestamp_to_ms] ( identifier[groups] ):
literal[string]
identifier[h] , identifier[m] , identifier[s] , identifier[frac] = identifier[map] ( identifier[int] , identifier[groups] )
identifier[ms] = identifier[frac] * literal[int] **( literal[int] - identifier[len] ( identifier[groups] [- literal[int] ]))
identifier[ms] += identifier[s] * literal[int]
identifier[ms] += identifier[m] * literal[int]
identifier[ms] += identifier[h] * literal[int]
keyword[return] identifier[ms] | def timestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups())
420
"""
(h, m, s, frac) = map(int, groups)
ms = frac * 10 ** (3 - len(groups[-1]))
ms += s * 1000
ms += m * 60000
ms += h * 3600000
return ms |
def get_solution(self, parameters=None):
"""stub"""
if not self.has_solution():
raise IllegalState()
try:
if not self.get_text('python_script'):
return self.get_text('solution').text
if not parameters:
parameters = self.get_parameters()
return self._get_parameterized_text(parameters)
except Exception:
return self.get_text('solution').text | def function[get_solution, parameter[self, parameters]]:
constant[stub]
if <ast.UnaryOp object at 0x7da18dc05900> begin[:]
<ast.Raise object at 0x7da18dc066e0>
<ast.Try object at 0x7da18dc05ae0> | keyword[def] identifier[get_solution] ( identifier[self] , identifier[parameters] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_solution] ():
keyword[raise] identifier[IllegalState] ()
keyword[try] :
keyword[if] keyword[not] identifier[self] . identifier[get_text] ( literal[string] ):
keyword[return] identifier[self] . identifier[get_text] ( literal[string] ). identifier[text]
keyword[if] keyword[not] identifier[parameters] :
identifier[parameters] = identifier[self] . identifier[get_parameters] ()
keyword[return] identifier[self] . identifier[_get_parameterized_text] ( identifier[parameters] )
keyword[except] identifier[Exception] :
keyword[return] identifier[self] . identifier[get_text] ( literal[string] ). identifier[text] | def get_solution(self, parameters=None):
"""stub"""
if not self.has_solution():
raise IllegalState() # depends on [control=['if'], data=[]]
try:
if not self.get_text('python_script'):
return self.get_text('solution').text # depends on [control=['if'], data=[]]
if not parameters:
parameters = self.get_parameters() # depends on [control=['if'], data=[]]
return self._get_parameterized_text(parameters) # depends on [control=['try'], data=[]]
except Exception:
return self.get_text('solution').text # depends on [control=['except'], data=[]] |
def get_relationships_by_genus_type(self, relationship_genus_type):
"""Gets a ``RelationshipList`` corresponding to the given relationship genus ``Type`` which does not include relationships of types derived from the specified ``Type``.
arg: relationship_genus_type (osid.type.Type): a relationship
genus type
return: (osid.relationship.RelationshipList) - the returned
``Relationship list``
raise: NullArgument - ``relationship_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('relationship',
collection='Relationship',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(relationship_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy) | def function[get_relationships_by_genus_type, parameter[self, relationship_genus_type]]:
constant[Gets a ``RelationshipList`` corresponding to the given relationship genus ``Type`` which does not include relationships of types derived from the specified ``Type``.
arg: relationship_genus_type (osid.type.Type): a relationship
genus type
return: (osid.relationship.RelationshipList) - the returned
``Relationship list``
raise: NullArgument - ``relationship_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[relationship]]]
variable[result] assign[=] call[call[name[collection].find, parameter[call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da18f8104c0>], [<ast.Call object at 0x7da18f8131f0>]]]]]].sort, parameter[constant[_id], name[DESCENDING]]]
return[call[name[objects].RelationshipList, parameter[name[result]]]] | keyword[def] identifier[get_relationships_by_genus_type] ( identifier[self] , identifier[relationship_genus_type] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
identifier[result] = identifier[collection] . identifier[find] (
identifier[dict] ({ literal[string] : identifier[str] ( identifier[relationship_genus_type] )},
** identifier[self] . identifier[_view_filter] ())). identifier[sort] ( literal[string] , identifier[DESCENDING] )
keyword[return] identifier[objects] . identifier[RelationshipList] ( identifier[result] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] ) | def get_relationships_by_genus_type(self, relationship_genus_type):
"""Gets a ``RelationshipList`` corresponding to the given relationship genus ``Type`` which does not include relationships of types derived from the specified ``Type``.
arg: relationship_genus_type (osid.type.Type): a relationship
genus type
return: (osid.relationship.RelationshipList) - the returned
``Relationship list``
raise: NullArgument - ``relationship_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('relationship', collection='Relationship', runtime=self._runtime)
result = collection.find(dict({'genusTypeId': str(relationship_genus_type)}, **self._view_filter())).sort('_id', DESCENDING)
return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy) |
def from_directory(cls, directory):
"""Create a security object from a directory.
Relies on standard names for each file (``skein.crt`` and
``skein.pem``)."""
cert_path = os.path.join(directory, 'skein.crt')
key_path = os.path.join(directory, 'skein.pem')
for path, name in [(cert_path, 'cert'), (key_path, 'key')]:
if not os.path.exists(path):
raise context.FileNotFoundError(
"Security %s file not found at %r" % (name, path)
)
return Security(cert_file=cert_path, key_file=key_path) | def function[from_directory, parameter[cls, directory]]:
constant[Create a security object from a directory.
Relies on standard names for each file (``skein.crt`` and
``skein.pem``).]
variable[cert_path] assign[=] call[name[os].path.join, parameter[name[directory], constant[skein.crt]]]
variable[key_path] assign[=] call[name[os].path.join, parameter[name[directory], constant[skein.pem]]]
for taget[tuple[[<ast.Name object at 0x7da1b08a6830>, <ast.Name object at 0x7da1b08a66b0>]]] in starred[list[[<ast.Tuple object at 0x7da1b08a66e0>, <ast.Tuple object at 0x7da1b08a6140>]]] begin[:]
if <ast.UnaryOp object at 0x7da1b08a5f90> begin[:]
<ast.Raise object at 0x7da1b0721f60>
return[call[name[Security], parameter[]]] | keyword[def] identifier[from_directory] ( identifier[cls] , identifier[directory] ):
literal[string]
identifier[cert_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , literal[string] )
identifier[key_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , literal[string] )
keyword[for] identifier[path] , identifier[name] keyword[in] [( identifier[cert_path] , literal[string] ),( identifier[key_path] , literal[string] )]:
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[raise] identifier[context] . identifier[FileNotFoundError] (
literal[string] %( identifier[name] , identifier[path] )
)
keyword[return] identifier[Security] ( identifier[cert_file] = identifier[cert_path] , identifier[key_file] = identifier[key_path] ) | def from_directory(cls, directory):
"""Create a security object from a directory.
Relies on standard names for each file (``skein.crt`` and
``skein.pem``)."""
cert_path = os.path.join(directory, 'skein.crt')
key_path = os.path.join(directory, 'skein.pem')
for (path, name) in [(cert_path, 'cert'), (key_path, 'key')]:
if not os.path.exists(path):
raise context.FileNotFoundError('Security %s file not found at %r' % (name, path)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return Security(cert_file=cert_path, key_file=key_path) |
def load_encoder(self, name:str, device:torch.device=None):
"Load the encoder `name` from the model directory."
encoder = get_model(self.model)[0]
if device is None: device = self.data.device
if hasattr(encoder, 'module'): encoder = encoder.module
encoder.load_state_dict(torch.load(self.path/self.model_dir/f'{name}.pth'))
encoder.load_state_dict(torch.load(self.path/self.model_dir/f'{name}.pth', map_location=device))
self.freeze() | def function[load_encoder, parameter[self, name, device]]:
constant[Load the encoder `name` from the model directory.]
variable[encoder] assign[=] call[call[name[get_model], parameter[name[self].model]]][constant[0]]
if compare[name[device] is constant[None]] begin[:]
variable[device] assign[=] name[self].data.device
if call[name[hasattr], parameter[name[encoder], constant[module]]] begin[:]
variable[encoder] assign[=] name[encoder].module
call[name[encoder].load_state_dict, parameter[call[name[torch].load, parameter[binary_operation[binary_operation[name[self].path / name[self].model_dir] / <ast.JoinedStr object at 0x7da1b1eeb940>]]]]]
call[name[encoder].load_state_dict, parameter[call[name[torch].load, parameter[binary_operation[binary_operation[name[self].path / name[self].model_dir] / <ast.JoinedStr object at 0x7da1b1eea020>]]]]]
call[name[self].freeze, parameter[]] | keyword[def] identifier[load_encoder] ( identifier[self] , identifier[name] : identifier[str] , identifier[device] : identifier[torch] . identifier[device] = keyword[None] ):
literal[string]
identifier[encoder] = identifier[get_model] ( identifier[self] . identifier[model] )[ literal[int] ]
keyword[if] identifier[device] keyword[is] keyword[None] : identifier[device] = identifier[self] . identifier[data] . identifier[device]
keyword[if] identifier[hasattr] ( identifier[encoder] , literal[string] ): identifier[encoder] = identifier[encoder] . identifier[module]
identifier[encoder] . identifier[load_state_dict] ( identifier[torch] . identifier[load] ( identifier[self] . identifier[path] / identifier[self] . identifier[model_dir] / literal[string] ))
identifier[encoder] . identifier[load_state_dict] ( identifier[torch] . identifier[load] ( identifier[self] . identifier[path] / identifier[self] . identifier[model_dir] / literal[string] , identifier[map_location] = identifier[device] ))
identifier[self] . identifier[freeze] () | def load_encoder(self, name: str, device: torch.device=None):
"""Load the encoder `name` from the model directory."""
encoder = get_model(self.model)[0]
if device is None:
device = self.data.device # depends on [control=['if'], data=['device']]
if hasattr(encoder, 'module'):
encoder = encoder.module # depends on [control=['if'], data=[]]
encoder.load_state_dict(torch.load(self.path / self.model_dir / f'{name}.pth'))
encoder.load_state_dict(torch.load(self.path / self.model_dir / f'{name}.pth', map_location=device))
self.freeze() |
def first(self, cascadeFetch=False):
'''
First - Returns the oldest record (lowerst primary key) with current filters.
This makes an efficient queue, as it only fetches a single object.
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@return - Instance of Model object, or None if no items match current filters
'''
obj = None
matchedKeys = self.getPrimaryKeys(sortByAge=True)
if matchedKeys:
# Loop so we don't return None when there are items, if item is deleted between getting key and getting obj
while matchedKeys and obj is None:
obj = self.get(matchedKeys.pop(0), cascadeFetch=cascadeFetch)
return obj | def function[first, parameter[self, cascadeFetch]]:
constant[
First - Returns the oldest record (lowerst primary key) with current filters.
This makes an efficient queue, as it only fetches a single object.
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@return - Instance of Model object, or None if no items match current filters
]
variable[obj] assign[=] constant[None]
variable[matchedKeys] assign[=] call[name[self].getPrimaryKeys, parameter[]]
if name[matchedKeys] begin[:]
while <ast.BoolOp object at 0x7da1afefa500> begin[:]
variable[obj] assign[=] call[name[self].get, parameter[call[name[matchedKeys].pop, parameter[constant[0]]]]]
return[name[obj]] | keyword[def] identifier[first] ( identifier[self] , identifier[cascadeFetch] = keyword[False] ):
literal[string]
identifier[obj] = keyword[None]
identifier[matchedKeys] = identifier[self] . identifier[getPrimaryKeys] ( identifier[sortByAge] = keyword[True] )
keyword[if] identifier[matchedKeys] :
keyword[while] identifier[matchedKeys] keyword[and] identifier[obj] keyword[is] keyword[None] :
identifier[obj] = identifier[self] . identifier[get] ( identifier[matchedKeys] . identifier[pop] ( literal[int] ), identifier[cascadeFetch] = identifier[cascadeFetch] )
keyword[return] identifier[obj] | def first(self, cascadeFetch=False):
"""
First - Returns the oldest record (lowerst primary key) with current filters.
This makes an efficient queue, as it only fetches a single object.
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@return - Instance of Model object, or None if no items match current filters
"""
obj = None
matchedKeys = self.getPrimaryKeys(sortByAge=True)
if matchedKeys: # Loop so we don't return None when there are items, if item is deleted between getting key and getting obj
while matchedKeys and obj is None:
obj = self.get(matchedKeys.pop(0), cascadeFetch=cascadeFetch) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
return obj |
def flatten_iterable(iterable):
"""flatten iterable, but leaves out strings
[[[1, 2, 3], [4, 5]], 6] -> [1, 2, 3, 4, 5, 6]
"""
for item in iterable:
if isinstance(item, collections.Iterable) and not isinstance(item, basestring):
for sub in flatten_iterable(item):
yield sub
else:
yield item | def function[flatten_iterable, parameter[iterable]]:
constant[flatten iterable, but leaves out strings
[[[1, 2, 3], [4, 5]], 6] -> [1, 2, 3, 4, 5, 6]
]
for taget[name[item]] in starred[name[iterable]] begin[:]
if <ast.BoolOp object at 0x7da1b11a44f0> begin[:]
for taget[name[sub]] in starred[call[name[flatten_iterable], parameter[name[item]]]] begin[:]
<ast.Yield object at 0x7da1b11e3d90> | keyword[def] identifier[flatten_iterable] ( identifier[iterable] ):
literal[string]
keyword[for] identifier[item] keyword[in] identifier[iterable] :
keyword[if] identifier[isinstance] ( identifier[item] , identifier[collections] . identifier[Iterable] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[item] , identifier[basestring] ):
keyword[for] identifier[sub] keyword[in] identifier[flatten_iterable] ( identifier[item] ):
keyword[yield] identifier[sub]
keyword[else] :
keyword[yield] identifier[item] | def flatten_iterable(iterable):
"""flatten iterable, but leaves out strings
[[[1, 2, 3], [4, 5]], 6] -> [1, 2, 3, 4, 5, 6]
"""
for item in iterable:
if isinstance(item, collections.Iterable) and (not isinstance(item, basestring)):
for sub in flatten_iterable(item):
yield sub # depends on [control=['for'], data=['sub']] # depends on [control=['if'], data=[]]
else:
yield item # depends on [control=['for'], data=['item']] |
def assert_none_of_selectors(self, selector, *locators, **kwargs):
"""
Asserts that none of the provided selectors are present on the given page or descendants of
the current node. If options are provided, the assertion will check that each locator is
present with those options as well (other than ``wait``). ::
page.assert_none_of_selectors("custom", "Tom", "Joe", visible="all")
page.assert_none_of_selectors("css", "#my_div", "a.not_clicked")
It accepts all options that :meth:`find_all` accepts, such as ``text`` and ``visible``.
The ``wait`` option applies to all of the selectors as a group, so none of the locators must
be present with ``wait`` (defaults to :data:`capybara.default_max_wait_time`) seconds.
If the given selector is not a valid selector, the first argument is assumed to be a locator
and the default selector will be used.
Args:
selector (str, optional): The name of the selector to use. Defaults to
:data:`capybara.default_selector`.
*locators (str): Variable length list of locators.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
"""
wait = kwargs['wait'] if 'wait' in kwargs else capybara.default_max_wait_time
if not isinstance(selector, Hashable) or selector not in selectors:
locators = (selector,) + locators
selector = capybara.default_selector
@self.synchronize(wait=wait)
def assert_none_of_selectors():
for locator in locators:
self.assert_no_selector(selector, locator, **kwargs)
return True
return assert_none_of_selectors() | def function[assert_none_of_selectors, parameter[self, selector]]:
constant[
Asserts that none of the provided selectors are present on the given page or descendants of
the current node. If options are provided, the assertion will check that each locator is
present with those options as well (other than ``wait``). ::
page.assert_none_of_selectors("custom", "Tom", "Joe", visible="all")
page.assert_none_of_selectors("css", "#my_div", "a.not_clicked")
It accepts all options that :meth:`find_all` accepts, such as ``text`` and ``visible``.
The ``wait`` option applies to all of the selectors as a group, so none of the locators must
be present with ``wait`` (defaults to :data:`capybara.default_max_wait_time`) seconds.
If the given selector is not a valid selector, the first argument is assumed to be a locator
and the default selector will be used.
Args:
selector (str, optional): The name of the selector to use. Defaults to
:data:`capybara.default_selector`.
*locators (str): Variable length list of locators.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
]
variable[wait] assign[=] <ast.IfExp object at 0x7da1b0216a40>
if <ast.BoolOp object at 0x7da1b02140a0> begin[:]
variable[locators] assign[=] binary_operation[tuple[[<ast.Name object at 0x7da1b0216e90>]] + name[locators]]
variable[selector] assign[=] name[capybara].default_selector
def function[assert_none_of_selectors, parameter[]]:
for taget[name[locator]] in starred[name[locators]] begin[:]
call[name[self].assert_no_selector, parameter[name[selector], name[locator]]]
return[constant[True]]
return[call[name[assert_none_of_selectors], parameter[]]] | keyword[def] identifier[assert_none_of_selectors] ( identifier[self] , identifier[selector] ,* identifier[locators] ,** identifier[kwargs] ):
literal[string]
identifier[wait] = identifier[kwargs] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[else] identifier[capybara] . identifier[default_max_wait_time]
keyword[if] keyword[not] identifier[isinstance] ( identifier[selector] , identifier[Hashable] ) keyword[or] identifier[selector] keyword[not] keyword[in] identifier[selectors] :
identifier[locators] =( identifier[selector] ,)+ identifier[locators]
identifier[selector] = identifier[capybara] . identifier[default_selector]
@ identifier[self] . identifier[synchronize] ( identifier[wait] = identifier[wait] )
keyword[def] identifier[assert_none_of_selectors] ():
keyword[for] identifier[locator] keyword[in] identifier[locators] :
identifier[self] . identifier[assert_no_selector] ( identifier[selector] , identifier[locator] ,** identifier[kwargs] )
keyword[return] keyword[True]
keyword[return] identifier[assert_none_of_selectors] () | def assert_none_of_selectors(self, selector, *locators, **kwargs):
"""
Asserts that none of the provided selectors are present on the given page or descendants of
the current node. If options are provided, the assertion will check that each locator is
present with those options as well (other than ``wait``). ::
page.assert_none_of_selectors("custom", "Tom", "Joe", visible="all")
page.assert_none_of_selectors("css", "#my_div", "a.not_clicked")
It accepts all options that :meth:`find_all` accepts, such as ``text`` and ``visible``.
The ``wait`` option applies to all of the selectors as a group, so none of the locators must
be present with ``wait`` (defaults to :data:`capybara.default_max_wait_time`) seconds.
If the given selector is not a valid selector, the first argument is assumed to be a locator
and the default selector will be used.
Args:
selector (str, optional): The name of the selector to use. Defaults to
:data:`capybara.default_selector`.
*locators (str): Variable length list of locators.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
"""
wait = kwargs['wait'] if 'wait' in kwargs else capybara.default_max_wait_time
if not isinstance(selector, Hashable) or selector not in selectors:
locators = (selector,) + locators
selector = capybara.default_selector # depends on [control=['if'], data=[]]
@self.synchronize(wait=wait)
def assert_none_of_selectors():
for locator in locators:
self.assert_no_selector(selector, locator, **kwargs) # depends on [control=['for'], data=['locator']]
return True
return assert_none_of_selectors() |
def _make_flex_doc(op_name, typ):
"""
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
"""
op_name = op_name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' ' + typ
else:
equiv = typ + ' ' + op_desc['op'] + ' other'
if typ == 'series':
base_doc = _flex_doc_SERIES
doc_no_examples = base_doc.format(
desc=op_desc['desc'],
op_name=op_name,
equiv=equiv,
reverse=op_desc['reverse']
)
if op_desc['series_examples']:
doc = doc_no_examples + op_desc['series_examples']
else:
doc = doc_no_examples
elif typ == 'dataframe':
base_doc = _flex_doc_FRAME
doc = base_doc.format(
desc=op_desc['desc'],
op_name=op_name,
equiv=equiv,
reverse=op_desc['reverse']
)
elif typ == 'panel':
base_doc = _flex_doc_PANEL
doc = base_doc.format(
desc=op_desc['desc'],
op_name=op_name,
equiv=equiv,
reverse=op_desc['reverse']
)
else:
raise AssertionError('Invalid typ argument.')
return doc | def function[_make_flex_doc, parameter[op_name, typ]]:
constant[
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
]
variable[op_name] assign[=] call[name[op_name].replace, parameter[constant[__], constant[]]]
variable[op_desc] assign[=] call[name[_op_descriptions]][name[op_name]]
if call[name[op_desc]][constant[reversed]] begin[:]
variable[equiv] assign[=] binary_operation[binary_operation[binary_operation[constant[other ] + call[name[op_desc]][constant[op]]] + constant[ ]] + name[typ]]
if compare[name[typ] equal[==] constant[series]] begin[:]
variable[base_doc] assign[=] name[_flex_doc_SERIES]
variable[doc_no_examples] assign[=] call[name[base_doc].format, parameter[]]
if call[name[op_desc]][constant[series_examples]] begin[:]
variable[doc] assign[=] binary_operation[name[doc_no_examples] + call[name[op_desc]][constant[series_examples]]]
return[name[doc]] | keyword[def] identifier[_make_flex_doc] ( identifier[op_name] , identifier[typ] ):
literal[string]
identifier[op_name] = identifier[op_name] . identifier[replace] ( literal[string] , literal[string] )
identifier[op_desc] = identifier[_op_descriptions] [ identifier[op_name] ]
keyword[if] identifier[op_desc] [ literal[string] ]:
identifier[equiv] = literal[string] + identifier[op_desc] [ literal[string] ]+ literal[string] + identifier[typ]
keyword[else] :
identifier[equiv] = identifier[typ] + literal[string] + identifier[op_desc] [ literal[string] ]+ literal[string]
keyword[if] identifier[typ] == literal[string] :
identifier[base_doc] = identifier[_flex_doc_SERIES]
identifier[doc_no_examples] = identifier[base_doc] . identifier[format] (
identifier[desc] = identifier[op_desc] [ literal[string] ],
identifier[op_name] = identifier[op_name] ,
identifier[equiv] = identifier[equiv] ,
identifier[reverse] = identifier[op_desc] [ literal[string] ]
)
keyword[if] identifier[op_desc] [ literal[string] ]:
identifier[doc] = identifier[doc_no_examples] + identifier[op_desc] [ literal[string] ]
keyword[else] :
identifier[doc] = identifier[doc_no_examples]
keyword[elif] identifier[typ] == literal[string] :
identifier[base_doc] = identifier[_flex_doc_FRAME]
identifier[doc] = identifier[base_doc] . identifier[format] (
identifier[desc] = identifier[op_desc] [ literal[string] ],
identifier[op_name] = identifier[op_name] ,
identifier[equiv] = identifier[equiv] ,
identifier[reverse] = identifier[op_desc] [ literal[string] ]
)
keyword[elif] identifier[typ] == literal[string] :
identifier[base_doc] = identifier[_flex_doc_PANEL]
identifier[doc] = identifier[base_doc] . identifier[format] (
identifier[desc] = identifier[op_desc] [ literal[string] ],
identifier[op_name] = identifier[op_name] ,
identifier[equiv] = identifier[equiv] ,
identifier[reverse] = identifier[op_desc] [ literal[string] ]
)
keyword[else] :
keyword[raise] identifier[AssertionError] ( literal[string] )
keyword[return] identifier[doc] | def _make_flex_doc(op_name, typ):
"""
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
"""
op_name = op_name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' ' + typ # depends on [control=['if'], data=[]]
else:
equiv = typ + ' ' + op_desc['op'] + ' other'
if typ == 'series':
base_doc = _flex_doc_SERIES
doc_no_examples = base_doc.format(desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'])
if op_desc['series_examples']:
doc = doc_no_examples + op_desc['series_examples'] # depends on [control=['if'], data=[]]
else:
doc = doc_no_examples # depends on [control=['if'], data=[]]
elif typ == 'dataframe':
base_doc = _flex_doc_FRAME
doc = base_doc.format(desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse']) # depends on [control=['if'], data=[]]
elif typ == 'panel':
base_doc = _flex_doc_PANEL
doc = base_doc.format(desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse']) # depends on [control=['if'], data=[]]
else:
raise AssertionError('Invalid typ argument.')
return doc |
def parse_account_key():
"""Parse account key to get public key"""
LOGGER.info("Parsing account key...")
cmd = [
'openssl', 'rsa',
'-in', os.path.join(gettempdir(), 'account.key'),
'-noout',
'-text'
]
devnull = open(os.devnull, 'wb')
return subprocess.check_output(cmd, stderr=devnull) | def function[parse_account_key, parameter[]]:
constant[Parse account key to get public key]
call[name[LOGGER].info, parameter[constant[Parsing account key...]]]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b1f639a0>, <ast.Constant object at 0x7da1b1f60a30>, <ast.Constant object at 0x7da1b1f61570>, <ast.Call object at 0x7da1b1f63370>, <ast.Constant object at 0x7da1b1f61360>, <ast.Constant object at 0x7da1b1f63bb0>]]
variable[devnull] assign[=] call[name[open], parameter[name[os].devnull, constant[wb]]]
return[call[name[subprocess].check_output, parameter[name[cmd]]]] | keyword[def] identifier[parse_account_key] ():
literal[string]
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[cmd] =[
literal[string] , literal[string] ,
literal[string] , identifier[os] . identifier[path] . identifier[join] ( identifier[gettempdir] (), literal[string] ),
literal[string] ,
literal[string]
]
identifier[devnull] = identifier[open] ( identifier[os] . identifier[devnull] , literal[string] )
keyword[return] identifier[subprocess] . identifier[check_output] ( identifier[cmd] , identifier[stderr] = identifier[devnull] ) | def parse_account_key():
"""Parse account key to get public key"""
LOGGER.info('Parsing account key...')
cmd = ['openssl', 'rsa', '-in', os.path.join(gettempdir(), 'account.key'), '-noout', '-text']
devnull = open(os.devnull, 'wb')
return subprocess.check_output(cmd, stderr=devnull) |
def getallstates(self, window_name, object_name):
"""
Get all states of given object
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: list of string on success.
@rtype: list
"""
object_handle = self._get_object_handle(window_name, object_name)
_obj_states = []
if object_handle.AXEnabled:
_obj_states.append("enabled")
if object_handle.AXFocused:
_obj_states.append("focused")
else:
try:
if object_handle.AXFocused:
_obj_states.append("focusable")
except:
pass
if re.match("AXCheckBox", object_handle.AXRole, re.M | re.U | re.L) or \
re.match("AXRadioButton", object_handle.AXRole,
re.M | re.U | re.L):
if object_handle.AXValue:
_obj_states.append("checked")
return _obj_states | def function[getallstates, parameter[self, window_name, object_name]]:
constant[
Get all states of given object
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: list of string on success.
@rtype: list
]
variable[object_handle] assign[=] call[name[self]._get_object_handle, parameter[name[window_name], name[object_name]]]
variable[_obj_states] assign[=] list[[]]
if name[object_handle].AXEnabled begin[:]
call[name[_obj_states].append, parameter[constant[enabled]]]
if name[object_handle].AXFocused begin[:]
call[name[_obj_states].append, parameter[constant[focused]]]
if <ast.BoolOp object at 0x7da1b00f6d70> begin[:]
if name[object_handle].AXValue begin[:]
call[name[_obj_states].append, parameter[constant[checked]]]
return[name[_obj_states]] | keyword[def] identifier[getallstates] ( identifier[self] , identifier[window_name] , identifier[object_name] ):
literal[string]
identifier[object_handle] = identifier[self] . identifier[_get_object_handle] ( identifier[window_name] , identifier[object_name] )
identifier[_obj_states] =[]
keyword[if] identifier[object_handle] . identifier[AXEnabled] :
identifier[_obj_states] . identifier[append] ( literal[string] )
keyword[if] identifier[object_handle] . identifier[AXFocused] :
identifier[_obj_states] . identifier[append] ( literal[string] )
keyword[else] :
keyword[try] :
keyword[if] identifier[object_handle] . identifier[AXFocused] :
identifier[_obj_states] . identifier[append] ( literal[string] )
keyword[except] :
keyword[pass]
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[object_handle] . identifier[AXRole] , identifier[re] . identifier[M] | identifier[re] . identifier[U] | identifier[re] . identifier[L] ) keyword[or] identifier[re] . identifier[match] ( literal[string] , identifier[object_handle] . identifier[AXRole] ,
identifier[re] . identifier[M] | identifier[re] . identifier[U] | identifier[re] . identifier[L] ):
keyword[if] identifier[object_handle] . identifier[AXValue] :
identifier[_obj_states] . identifier[append] ( literal[string] )
keyword[return] identifier[_obj_states] | def getallstates(self, window_name, object_name):
"""
Get all states of given object
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: list of string on success.
@rtype: list
"""
object_handle = self._get_object_handle(window_name, object_name)
_obj_states = []
if object_handle.AXEnabled:
_obj_states.append('enabled') # depends on [control=['if'], data=[]]
if object_handle.AXFocused:
_obj_states.append('focused') # depends on [control=['if'], data=[]]
else:
try:
if object_handle.AXFocused:
_obj_states.append('focusable') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
if re.match('AXCheckBox', object_handle.AXRole, re.M | re.U | re.L) or re.match('AXRadioButton', object_handle.AXRole, re.M | re.U | re.L):
if object_handle.AXValue:
_obj_states.append('checked') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return _obj_states |
def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
ia.do_assert(xyxy.shape[1] == 4, "Expected input array of shape (N, 4), got shape %s." % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape) | def function[from_xyxy_array, parameter[cls, xyxy, shape]]:
constant[
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
]
call[name[ia].do_assert, parameter[compare[call[name[xyxy].shape][constant[1]] equal[==] constant[4]], binary_operation[constant[Expected input array of shape (N, 4), got shape %s.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18c4cc1f0>]]]]]
variable[boxes] assign[=] <ast.ListComp object at 0x7da18c4cd8a0>
return[call[name[cls], parameter[name[boxes], name[shape]]]] | keyword[def] identifier[from_xyxy_array] ( identifier[cls] , identifier[xyxy] , identifier[shape] ):
literal[string]
identifier[ia] . identifier[do_assert] ( identifier[xyxy] . identifier[shape] [ literal[int] ]== literal[int] , literal[string] %( identifier[xyxy] . identifier[shape] ,))
identifier[boxes] =[ identifier[BoundingBox] (* identifier[row] ) keyword[for] identifier[row] keyword[in] identifier[xyxy] ]
keyword[return] identifier[cls] ( identifier[boxes] , identifier[shape] ) | def from_xyxy_array(cls, xyxy, shape):
"""
Convert an (N,4) ndarray to a BoundingBoxesOnImage object.
This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N,4) ndarray
Array containing the corner coordinates (top-left, bottom-right) of ``N`` bounding boxes
in the form ``(x1, y1, x2, y2)``. Should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.BoundingBoxesOnImage
Object containing a list of BoundingBox objects following the provided corner coordinates.
"""
ia.do_assert(xyxy.shape[1] == 4, 'Expected input array of shape (N, 4), got shape %s.' % (xyxy.shape,))
boxes = [BoundingBox(*row) for row in xyxy]
return cls(boxes, shape) |
def _get_section_name(cls, parser):
"""Parse options from relevant section."""
for section_name in cls.POSSIBLE_SECTION_NAMES:
if parser.has_section(section_name):
return section_name
return None | def function[_get_section_name, parameter[cls, parser]]:
constant[Parse options from relevant section.]
for taget[name[section_name]] in starred[name[cls].POSSIBLE_SECTION_NAMES] begin[:]
if call[name[parser].has_section, parameter[name[section_name]]] begin[:]
return[name[section_name]]
return[constant[None]] | keyword[def] identifier[_get_section_name] ( identifier[cls] , identifier[parser] ):
literal[string]
keyword[for] identifier[section_name] keyword[in] identifier[cls] . identifier[POSSIBLE_SECTION_NAMES] :
keyword[if] identifier[parser] . identifier[has_section] ( identifier[section_name] ):
keyword[return] identifier[section_name]
keyword[return] keyword[None] | def _get_section_name(cls, parser):
"""Parse options from relevant section."""
for section_name in cls.POSSIBLE_SECTION_NAMES:
if parser.has_section(section_name):
return section_name # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['section_name']]
return None |
async def top_tracks(self, country: str = 'US') -> List[Track]:
"""Get Spotify catalog information about an artist’s top tracks by country.
Parameters
----------
country : str
The country to search for, it defaults to 'US'.
Returns
-------
tracks : List[Track]
The artists top tracks.
"""
from .track import Track
top = await self.__client.http.artist_top_tracks(self.id, country=country)
return list(Track(self.__client, item) for item in top['tracks']) | <ast.AsyncFunctionDef object at 0x7da20c6e4d60> | keyword[async] keyword[def] identifier[top_tracks] ( identifier[self] , identifier[country] : identifier[str] = literal[string] )-> identifier[List] [ identifier[Track] ]:
literal[string]
keyword[from] . identifier[track] keyword[import] identifier[Track]
identifier[top] = keyword[await] identifier[self] . identifier[__client] . identifier[http] . identifier[artist_top_tracks] ( identifier[self] . identifier[id] , identifier[country] = identifier[country] )
keyword[return] identifier[list] ( identifier[Track] ( identifier[self] . identifier[__client] , identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[top] [ literal[string] ]) | async def top_tracks(self, country: str='US') -> List[Track]:
"""Get Spotify catalog information about an artist’s top tracks by country.
Parameters
----------
country : str
The country to search for, it defaults to 'US'.
Returns
-------
tracks : List[Track]
The artists top tracks.
"""
from .track import Track
top = await self.__client.http.artist_top_tracks(self.id, country=country)
return list((Track(self.__client, item) for item in top['tracks'])) |
def UDiv(a: BitVec, b: BitVec) -> BitVec:
"""Create an unsigned division expression.
:param a:
:param b:
:return:
"""
return _arithmetic_helper(a, b, z3.UDiv) | def function[UDiv, parameter[a, b]]:
constant[Create an unsigned division expression.
:param a:
:param b:
:return:
]
return[call[name[_arithmetic_helper], parameter[name[a], name[b], name[z3].UDiv]]] | keyword[def] identifier[UDiv] ( identifier[a] : identifier[BitVec] , identifier[b] : identifier[BitVec] )-> identifier[BitVec] :
literal[string]
keyword[return] identifier[_arithmetic_helper] ( identifier[a] , identifier[b] , identifier[z3] . identifier[UDiv] ) | def UDiv(a: BitVec, b: BitVec) -> BitVec:
"""Create an unsigned division expression.
:param a:
:param b:
:return:
"""
return _arithmetic_helper(a, b, z3.UDiv) |
def convert(model, feature_names, target):
"""Convert a Support Vector Classtion (SVC) model to the protobuf spec.
Parameters
----------
model: SVC
A trained SVC encoder model.
feature_names: [str], optional (default=None)
Name of the input columns.
target: str, optional (default=None)
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
spec = _generate_base_svm_classifier_spec(model)
spec = set_classifier_interface_params(spec, feature_names, model.classes_, 'supportVectorClassifier', output_features = target)
svm = spec.supportVectorClassifier
for i in model.n_support_:
svm.numberOfSupportVectorsPerClass.append(int(i))
if len(model.probA_) != 0 and len(model.classes_) == 2:
print("[WARNING] Scikit Learn uses a technique to normalize pairwise probabilities even for binary classification. "
"This can cause differences in predicted probabilities, usually less than 0.5%.")
# If this is an empty list, then model.probA_ will be an empty list.
if len(model.probA_) != 0:
for i in model.probA_:
svm.probA.append(i)
for i in model.probB_:
svm.probB.append(i)
return _MLModel(spec) | def function[convert, parameter[model, feature_names, target]]:
constant[Convert a Support Vector Classtion (SVC) model to the protobuf spec.
Parameters
----------
model: SVC
A trained SVC encoder model.
feature_names: [str], optional (default=None)
Name of the input columns.
target: str, optional (default=None)
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
]
if <ast.UnaryOp object at 0x7da1b1f748e0> begin[:]
<ast.Raise object at 0x7da1b1f76260>
variable[spec] assign[=] call[name[_generate_base_svm_classifier_spec], parameter[name[model]]]
variable[spec] assign[=] call[name[set_classifier_interface_params], parameter[name[spec], name[feature_names], name[model].classes_, constant[supportVectorClassifier]]]
variable[svm] assign[=] name[spec].supportVectorClassifier
for taget[name[i]] in starred[name[model].n_support_] begin[:]
call[name[svm].numberOfSupportVectorsPerClass.append, parameter[call[name[int], parameter[name[i]]]]]
if <ast.BoolOp object at 0x7da1b1f76110> begin[:]
call[name[print], parameter[constant[[WARNING] Scikit Learn uses a technique to normalize pairwise probabilities even for binary classification. This can cause differences in predicted probabilities, usually less than 0.5%.]]]
if compare[call[name[len], parameter[name[model].probA_]] not_equal[!=] constant[0]] begin[:]
for taget[name[i]] in starred[name[model].probA_] begin[:]
call[name[svm].probA.append, parameter[name[i]]]
for taget[name[i]] in starred[name[model].probB_] begin[:]
call[name[svm].probB.append, parameter[name[i]]]
return[call[name[_MLModel], parameter[name[spec]]]] | keyword[def] identifier[convert] ( identifier[model] , identifier[feature_names] , identifier[target] ):
literal[string]
keyword[if] keyword[not] ( identifier[_HAS_SKLEARN] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[spec] = identifier[_generate_base_svm_classifier_spec] ( identifier[model] )
identifier[spec] = identifier[set_classifier_interface_params] ( identifier[spec] , identifier[feature_names] , identifier[model] . identifier[classes_] , literal[string] , identifier[output_features] = identifier[target] )
identifier[svm] = identifier[spec] . identifier[supportVectorClassifier]
keyword[for] identifier[i] keyword[in] identifier[model] . identifier[n_support_] :
identifier[svm] . identifier[numberOfSupportVectorsPerClass] . identifier[append] ( identifier[int] ( identifier[i] ))
keyword[if] identifier[len] ( identifier[model] . identifier[probA_] )!= literal[int] keyword[and] identifier[len] ( identifier[model] . identifier[classes_] )== literal[int] :
identifier[print] ( literal[string]
literal[string] )
keyword[if] identifier[len] ( identifier[model] . identifier[probA_] )!= literal[int] :
keyword[for] identifier[i] keyword[in] identifier[model] . identifier[probA_] :
identifier[svm] . identifier[probA] . identifier[append] ( identifier[i] )
keyword[for] identifier[i] keyword[in] identifier[model] . identifier[probB_] :
identifier[svm] . identifier[probB] . identifier[append] ( identifier[i] )
keyword[return] identifier[_MLModel] ( identifier[spec] ) | def convert(model, feature_names, target):
"""Convert a Support Vector Classtion (SVC) model to the protobuf spec.
Parameters
----------
model: SVC
A trained SVC encoder model.
feature_names: [str], optional (default=None)
Name of the input columns.
target: str, optional (default=None)
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not _HAS_SKLEARN:
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') # depends on [control=['if'], data=[]]
spec = _generate_base_svm_classifier_spec(model)
spec = set_classifier_interface_params(spec, feature_names, model.classes_, 'supportVectorClassifier', output_features=target)
svm = spec.supportVectorClassifier
for i in model.n_support_:
svm.numberOfSupportVectorsPerClass.append(int(i)) # depends on [control=['for'], data=['i']]
if len(model.probA_) != 0 and len(model.classes_) == 2:
print('[WARNING] Scikit Learn uses a technique to normalize pairwise probabilities even for binary classification. This can cause differences in predicted probabilities, usually less than 0.5%.') # depends on [control=['if'], data=[]]
# If this is an empty list, then model.probA_ will be an empty list.
if len(model.probA_) != 0:
for i in model.probA_:
svm.probA.append(i) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
for i in model.probB_:
svm.probB.append(i) # depends on [control=['for'], data=['i']]
return _MLModel(spec) |
def get_thread_block_dimensions(params, block_size_names=None):
"""thread block size from tuning params, currently using convention"""
if not block_size_names:
block_size_names = default_block_size_names
block_size_x = params.get(block_size_names[0], 256)
block_size_y = params.get(block_size_names[1], 1)
block_size_z = params.get(block_size_names[2], 1)
return (int(block_size_x), int(block_size_y), int(block_size_z)) | def function[get_thread_block_dimensions, parameter[params, block_size_names]]:
constant[thread block size from tuning params, currently using convention]
if <ast.UnaryOp object at 0x7da1b0466830> begin[:]
variable[block_size_names] assign[=] name[default_block_size_names]
variable[block_size_x] assign[=] call[name[params].get, parameter[call[name[block_size_names]][constant[0]], constant[256]]]
variable[block_size_y] assign[=] call[name[params].get, parameter[call[name[block_size_names]][constant[1]], constant[1]]]
variable[block_size_z] assign[=] call[name[params].get, parameter[call[name[block_size_names]][constant[2]], constant[1]]]
return[tuple[[<ast.Call object at 0x7da1b0464820>, <ast.Call object at 0x7da1b0464520>, <ast.Call object at 0x7da1b0464760>]]] | keyword[def] identifier[get_thread_block_dimensions] ( identifier[params] , identifier[block_size_names] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[block_size_names] :
identifier[block_size_names] = identifier[default_block_size_names]
identifier[block_size_x] = identifier[params] . identifier[get] ( identifier[block_size_names] [ literal[int] ], literal[int] )
identifier[block_size_y] = identifier[params] . identifier[get] ( identifier[block_size_names] [ literal[int] ], literal[int] )
identifier[block_size_z] = identifier[params] . identifier[get] ( identifier[block_size_names] [ literal[int] ], literal[int] )
keyword[return] ( identifier[int] ( identifier[block_size_x] ), identifier[int] ( identifier[block_size_y] ), identifier[int] ( identifier[block_size_z] )) | def get_thread_block_dimensions(params, block_size_names=None):
"""thread block size from tuning params, currently using convention"""
if not block_size_names:
block_size_names = default_block_size_names # depends on [control=['if'], data=[]]
block_size_x = params.get(block_size_names[0], 256)
block_size_y = params.get(block_size_names[1], 1)
block_size_z = params.get(block_size_names[2], 1)
return (int(block_size_x), int(block_size_y), int(block_size_z)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.