code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def create_epochs(self, epoch_length=30, first_second=None):
"""Create epochs in annotation file.
Parameters
----------
epoch_length : int
duration in seconds of each epoch
first_second : int, optional
Time, in seconds from record start, at which the epochs begin
"""
lg.info('creating epochs of length ' + str(epoch_length))
if first_second is None:
first_second = self.first_second
last_sec = ceil((self.last_second - first_second) /
epoch_length) * epoch_length
stages = self.rater.find('stages')
for epoch_beg in range(first_second, last_sec, epoch_length):
epoch = SubElement(stages, 'epoch')
start_time = SubElement(epoch, 'epoch_start')
start_time.text = str(epoch_beg)
end_time = SubElement(epoch, 'epoch_end')
end_time.text = str(epoch_beg + epoch_length)
stage = SubElement(epoch, 'stage')
stage.text = 'Unknown'
quality = SubElement(epoch, 'quality')
quality.text = 'Good' | def function[create_epochs, parameter[self, epoch_length, first_second]]:
constant[Create epochs in annotation file.
Parameters
----------
epoch_length : int
duration in seconds of each epoch
first_second : int, optional
Time, in seconds from record start, at which the epochs begin
]
call[name[lg].info, parameter[binary_operation[constant[creating epochs of length ] + call[name[str], parameter[name[epoch_length]]]]]]
if compare[name[first_second] is constant[None]] begin[:]
variable[first_second] assign[=] name[self].first_second
variable[last_sec] assign[=] binary_operation[call[name[ceil], parameter[binary_operation[binary_operation[name[self].last_second - name[first_second]] / name[epoch_length]]]] * name[epoch_length]]
variable[stages] assign[=] call[name[self].rater.find, parameter[constant[stages]]]
for taget[name[epoch_beg]] in starred[call[name[range], parameter[name[first_second], name[last_sec], name[epoch_length]]]] begin[:]
variable[epoch] assign[=] call[name[SubElement], parameter[name[stages], constant[epoch]]]
variable[start_time] assign[=] call[name[SubElement], parameter[name[epoch], constant[epoch_start]]]
name[start_time].text assign[=] call[name[str], parameter[name[epoch_beg]]]
variable[end_time] assign[=] call[name[SubElement], parameter[name[epoch], constant[epoch_end]]]
name[end_time].text assign[=] call[name[str], parameter[binary_operation[name[epoch_beg] + name[epoch_length]]]]
variable[stage] assign[=] call[name[SubElement], parameter[name[epoch], constant[stage]]]
name[stage].text assign[=] constant[Unknown]
variable[quality] assign[=] call[name[SubElement], parameter[name[epoch], constant[quality]]]
name[quality].text assign[=] constant[Good] | keyword[def] identifier[create_epochs] ( identifier[self] , identifier[epoch_length] = literal[int] , identifier[first_second] = keyword[None] ):
literal[string]
identifier[lg] . identifier[info] ( literal[string] + identifier[str] ( identifier[epoch_length] ))
keyword[if] identifier[first_second] keyword[is] keyword[None] :
identifier[first_second] = identifier[self] . identifier[first_second]
identifier[last_sec] = identifier[ceil] (( identifier[self] . identifier[last_second] - identifier[first_second] )/
identifier[epoch_length] )* identifier[epoch_length]
identifier[stages] = identifier[self] . identifier[rater] . identifier[find] ( literal[string] )
keyword[for] identifier[epoch_beg] keyword[in] identifier[range] ( identifier[first_second] , identifier[last_sec] , identifier[epoch_length] ):
identifier[epoch] = identifier[SubElement] ( identifier[stages] , literal[string] )
identifier[start_time] = identifier[SubElement] ( identifier[epoch] , literal[string] )
identifier[start_time] . identifier[text] = identifier[str] ( identifier[epoch_beg] )
identifier[end_time] = identifier[SubElement] ( identifier[epoch] , literal[string] )
identifier[end_time] . identifier[text] = identifier[str] ( identifier[epoch_beg] + identifier[epoch_length] )
identifier[stage] = identifier[SubElement] ( identifier[epoch] , literal[string] )
identifier[stage] . identifier[text] = literal[string]
identifier[quality] = identifier[SubElement] ( identifier[epoch] , literal[string] )
identifier[quality] . identifier[text] = literal[string] | def create_epochs(self, epoch_length=30, first_second=None):
"""Create epochs in annotation file.
Parameters
----------
epoch_length : int
duration in seconds of each epoch
first_second : int, optional
Time, in seconds from record start, at which the epochs begin
"""
lg.info('creating epochs of length ' + str(epoch_length))
if first_second is None:
first_second = self.first_second # depends on [control=['if'], data=['first_second']]
last_sec = ceil((self.last_second - first_second) / epoch_length) * epoch_length
stages = self.rater.find('stages')
for epoch_beg in range(first_second, last_sec, epoch_length):
epoch = SubElement(stages, 'epoch')
start_time = SubElement(epoch, 'epoch_start')
start_time.text = str(epoch_beg)
end_time = SubElement(epoch, 'epoch_end')
end_time.text = str(epoch_beg + epoch_length)
stage = SubElement(epoch, 'stage')
stage.text = 'Unknown'
quality = SubElement(epoch, 'quality')
quality.text = 'Good' # depends on [control=['for'], data=['epoch_beg']] |
def make_string_field_value(cls, field):
"""
String Field has three constraints (apart from anything
in the super class)
Args:
field (StringField): actual string field object from a
model declaration
Returns:
random string value
"""
if field.regex is not None:
raise NotImplementedError
string_range = cls.get_range(field)
return cls.get_random_string(string_range) | def function[make_string_field_value, parameter[cls, field]]:
constant[
String Field has three constraints (apart from anything
in the super class)
Args:
field (StringField): actual string field object from a
model declaration
Returns:
random string value
]
if compare[name[field].regex is_not constant[None]] begin[:]
<ast.Raise object at 0x7da20cabd840>
variable[string_range] assign[=] call[name[cls].get_range, parameter[name[field]]]
return[call[name[cls].get_random_string, parameter[name[string_range]]]] | keyword[def] identifier[make_string_field_value] ( identifier[cls] , identifier[field] ):
literal[string]
keyword[if] identifier[field] . identifier[regex] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[NotImplementedError]
identifier[string_range] = identifier[cls] . identifier[get_range] ( identifier[field] )
keyword[return] identifier[cls] . identifier[get_random_string] ( identifier[string_range] ) | def make_string_field_value(cls, field):
"""
String Field has three constraints (apart from anything
in the super class)
Args:
field (StringField): actual string field object from a
model declaration
Returns:
random string value
"""
if field.regex is not None:
raise NotImplementedError # depends on [control=['if'], data=[]]
string_range = cls.get_range(field)
return cls.get_random_string(string_range) |
def do_group(self, arg):
"""Creates a new analysis group with unique settings for plotting/tabulating etc.
or switches the active group to the specified name.
"""
from copy import deepcopy
vals = arg.split()
if len(vals) == 0 or vals[0] not in ["list", "duplicate", "add", "switch", "remove"]:
self.help_group()
return
if vals[0] == "add":
if vals[1] not in self.args[self.active]:
self.args[self.active][vals[1]] = self._template_args.copy()
msg.okay("Created analysis group '{}'.".format(vals[1]))
self.do_group("switch {}".format(vals[1]))
else:
msg.info("Group '{}' already exists. Switching to it.".format(vals[1]))
self.do_group("switch {}".format(vals[1]))
elif vals[0] == "switch":
if vals[1] in self.args[self.active]:
self.group = vals[1]
self._set_def_prompt()
else:
msg.warn("The group '{}' does not exist.".format(vals[1]))
elif vals[0] == "duplicate" and len(vals) == 3 and vals[1] in self.args[self.active]:
self.args[self.active][vals[2]] = deepcopy(self.args[self.active][vals[1]])
msg.okay("Duplicated analysis group '{}' into '{}'.".format(vals[1], vals[2]))
self.do_group("switch {}".format(vals[2]))
elif vals[0] == "list":
for key in self.args[self.active]:
msg.info(key)
elif vals[0] == "remove" and vals[1] in self.args[self.active]:
del self.args[self.active][vals[1]]
self.do_group("list") | def function[do_group, parameter[self, arg]]:
constant[Creates a new analysis group with unique settings for plotting/tabulating etc.
or switches the active group to the specified name.
]
from relative_module[copy] import module[deepcopy]
variable[vals] assign[=] call[name[arg].split, parameter[]]
if <ast.BoolOp object at 0x7da2054a52d0> begin[:]
call[name[self].help_group, parameter[]]
return[None]
if compare[call[name[vals]][constant[0]] equal[==] constant[add]] begin[:]
if compare[call[name[vals]][constant[1]] <ast.NotIn object at 0x7da2590d7190> call[name[self].args][name[self].active]] begin[:]
call[call[name[self].args][name[self].active]][call[name[vals]][constant[1]]] assign[=] call[name[self]._template_args.copy, parameter[]]
call[name[msg].okay, parameter[call[constant[Created analysis group '{}'.].format, parameter[call[name[vals]][constant[1]]]]]]
call[name[self].do_group, parameter[call[constant[switch {}].format, parameter[call[name[vals]][constant[1]]]]]] | keyword[def] identifier[do_group] ( identifier[self] , identifier[arg] ):
literal[string]
keyword[from] identifier[copy] keyword[import] identifier[deepcopy]
identifier[vals] = identifier[arg] . identifier[split] ()
keyword[if] identifier[len] ( identifier[vals] )== literal[int] keyword[or] identifier[vals] [ literal[int] ] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[self] . identifier[help_group] ()
keyword[return]
keyword[if] identifier[vals] [ literal[int] ]== literal[string] :
keyword[if] identifier[vals] [ literal[int] ] keyword[not] keyword[in] identifier[self] . identifier[args] [ identifier[self] . identifier[active] ]:
identifier[self] . identifier[args] [ identifier[self] . identifier[active] ][ identifier[vals] [ literal[int] ]]= identifier[self] . identifier[_template_args] . identifier[copy] ()
identifier[msg] . identifier[okay] ( literal[string] . identifier[format] ( identifier[vals] [ literal[int] ]))
identifier[self] . identifier[do_group] ( literal[string] . identifier[format] ( identifier[vals] [ literal[int] ]))
keyword[else] :
identifier[msg] . identifier[info] ( literal[string] . identifier[format] ( identifier[vals] [ literal[int] ]))
identifier[self] . identifier[do_group] ( literal[string] . identifier[format] ( identifier[vals] [ literal[int] ]))
keyword[elif] identifier[vals] [ literal[int] ]== literal[string] :
keyword[if] identifier[vals] [ literal[int] ] keyword[in] identifier[self] . identifier[args] [ identifier[self] . identifier[active] ]:
identifier[self] . identifier[group] = identifier[vals] [ literal[int] ]
identifier[self] . identifier[_set_def_prompt] ()
keyword[else] :
identifier[msg] . identifier[warn] ( literal[string] . identifier[format] ( identifier[vals] [ literal[int] ]))
keyword[elif] identifier[vals] [ literal[int] ]== literal[string] keyword[and] identifier[len] ( identifier[vals] )== literal[int] keyword[and] identifier[vals] [ literal[int] ] keyword[in] identifier[self] . identifier[args] [ identifier[self] . identifier[active] ]:
identifier[self] . identifier[args] [ identifier[self] . identifier[active] ][ identifier[vals] [ literal[int] ]]= identifier[deepcopy] ( identifier[self] . identifier[args] [ identifier[self] . identifier[active] ][ identifier[vals] [ literal[int] ]])
identifier[msg] . identifier[okay] ( literal[string] . identifier[format] ( identifier[vals] [ literal[int] ], identifier[vals] [ literal[int] ]))
identifier[self] . identifier[do_group] ( literal[string] . identifier[format] ( identifier[vals] [ literal[int] ]))
keyword[elif] identifier[vals] [ literal[int] ]== literal[string] :
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[args] [ identifier[self] . identifier[active] ]:
identifier[msg] . identifier[info] ( identifier[key] )
keyword[elif] identifier[vals] [ literal[int] ]== literal[string] keyword[and] identifier[vals] [ literal[int] ] keyword[in] identifier[self] . identifier[args] [ identifier[self] . identifier[active] ]:
keyword[del] identifier[self] . identifier[args] [ identifier[self] . identifier[active] ][ identifier[vals] [ literal[int] ]]
identifier[self] . identifier[do_group] ( literal[string] ) | def do_group(self, arg):
"""Creates a new analysis group with unique settings for plotting/tabulating etc.
or switches the active group to the specified name.
"""
from copy import deepcopy
vals = arg.split()
if len(vals) == 0 or vals[0] not in ['list', 'duplicate', 'add', 'switch', 'remove']:
self.help_group()
return # depends on [control=['if'], data=[]]
if vals[0] == 'add':
if vals[1] not in self.args[self.active]:
self.args[self.active][vals[1]] = self._template_args.copy()
msg.okay("Created analysis group '{}'.".format(vals[1]))
self.do_group('switch {}'.format(vals[1])) # depends on [control=['if'], data=[]]
else:
msg.info("Group '{}' already exists. Switching to it.".format(vals[1]))
self.do_group('switch {}'.format(vals[1])) # depends on [control=['if'], data=[]]
elif vals[0] == 'switch':
if vals[1] in self.args[self.active]:
self.group = vals[1]
self._set_def_prompt() # depends on [control=['if'], data=[]]
else:
msg.warn("The group '{}' does not exist.".format(vals[1])) # depends on [control=['if'], data=[]]
elif vals[0] == 'duplicate' and len(vals) == 3 and (vals[1] in self.args[self.active]):
self.args[self.active][vals[2]] = deepcopy(self.args[self.active][vals[1]])
msg.okay("Duplicated analysis group '{}' into '{}'.".format(vals[1], vals[2]))
self.do_group('switch {}'.format(vals[2])) # depends on [control=['if'], data=[]]
elif vals[0] == 'list':
for key in self.args[self.active]:
msg.info(key) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]]
elif vals[0] == 'remove' and vals[1] in self.args[self.active]:
del self.args[self.active][vals[1]]
self.do_group('list') # depends on [control=['if'], data=[]] |
def write_points(self, series_name, start_date, end_date, resolution=10, batch_size=5000):
"""
Create sample datapoints between two dates with the given resolution (in seconds)
:param series_name:
:param start_date:
:param end_date:
:param resolution:
:param batch_size:
"""
start_ts = int(start_date.strftime("%s"))
end_ts = int(end_date.strftime("%s"))
range_seconds = end_ts - start_ts
num_datapoints = range_seconds / resolution
timestamps = [start_ts + i * resolution for i in range(num_datapoints)]
columns = ["time", "value"]
for batch in tqdm(self.batch(timestamps, batch_size)):
points = []
for timestamp in batch:
point = random.randint(1, 100)
points.append([timestamp, point])
datapoint = self.create_datapoint(series_name, columns, points)
self.client.write_points([datapoint]) | def function[write_points, parameter[self, series_name, start_date, end_date, resolution, batch_size]]:
constant[
Create sample datapoints between two dates with the given resolution (in seconds)
:param series_name:
:param start_date:
:param end_date:
:param resolution:
:param batch_size:
]
variable[start_ts] assign[=] call[name[int], parameter[call[name[start_date].strftime, parameter[constant[%s]]]]]
variable[end_ts] assign[=] call[name[int], parameter[call[name[end_date].strftime, parameter[constant[%s]]]]]
variable[range_seconds] assign[=] binary_operation[name[end_ts] - name[start_ts]]
variable[num_datapoints] assign[=] binary_operation[name[range_seconds] / name[resolution]]
variable[timestamps] assign[=] <ast.ListComp object at 0x7da1b00d8520>
variable[columns] assign[=] list[[<ast.Constant object at 0x7da1b00d97e0>, <ast.Constant object at 0x7da1b00d8ca0>]]
for taget[name[batch]] in starred[call[name[tqdm], parameter[call[name[self].batch, parameter[name[timestamps], name[batch_size]]]]]] begin[:]
variable[points] assign[=] list[[]]
for taget[name[timestamp]] in starred[name[batch]] begin[:]
variable[point] assign[=] call[name[random].randint, parameter[constant[1], constant[100]]]
call[name[points].append, parameter[list[[<ast.Name object at 0x7da1b00d85e0>, <ast.Name object at 0x7da1b00daec0>]]]]
variable[datapoint] assign[=] call[name[self].create_datapoint, parameter[name[series_name], name[columns], name[points]]]
call[name[self].client.write_points, parameter[list[[<ast.Name object at 0x7da1aff1cb20>]]]] | keyword[def] identifier[write_points] ( identifier[self] , identifier[series_name] , identifier[start_date] , identifier[end_date] , identifier[resolution] = literal[int] , identifier[batch_size] = literal[int] ):
literal[string]
identifier[start_ts] = identifier[int] ( identifier[start_date] . identifier[strftime] ( literal[string] ))
identifier[end_ts] = identifier[int] ( identifier[end_date] . identifier[strftime] ( literal[string] ))
identifier[range_seconds] = identifier[end_ts] - identifier[start_ts]
identifier[num_datapoints] = identifier[range_seconds] / identifier[resolution]
identifier[timestamps] =[ identifier[start_ts] + identifier[i] * identifier[resolution] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_datapoints] )]
identifier[columns] =[ literal[string] , literal[string] ]
keyword[for] identifier[batch] keyword[in] identifier[tqdm] ( identifier[self] . identifier[batch] ( identifier[timestamps] , identifier[batch_size] )):
identifier[points] =[]
keyword[for] identifier[timestamp] keyword[in] identifier[batch] :
identifier[point] = identifier[random] . identifier[randint] ( literal[int] , literal[int] )
identifier[points] . identifier[append] ([ identifier[timestamp] , identifier[point] ])
identifier[datapoint] = identifier[self] . identifier[create_datapoint] ( identifier[series_name] , identifier[columns] , identifier[points] )
identifier[self] . identifier[client] . identifier[write_points] ([ identifier[datapoint] ]) | def write_points(self, series_name, start_date, end_date, resolution=10, batch_size=5000):
"""
Create sample datapoints between two dates with the given resolution (in seconds)
:param series_name:
:param start_date:
:param end_date:
:param resolution:
:param batch_size:
"""
start_ts = int(start_date.strftime('%s'))
end_ts = int(end_date.strftime('%s'))
range_seconds = end_ts - start_ts
num_datapoints = range_seconds / resolution
timestamps = [start_ts + i * resolution for i in range(num_datapoints)]
columns = ['time', 'value']
for batch in tqdm(self.batch(timestamps, batch_size)):
points = []
for timestamp in batch:
point = random.randint(1, 100)
points.append([timestamp, point]) # depends on [control=['for'], data=['timestamp']]
datapoint = self.create_datapoint(series_name, columns, points)
self.client.write_points([datapoint]) # depends on [control=['for'], data=['batch']] |
def get_resizer(self, size, target_size):
'''Choose a resizer depending an image size'''
sw, sh = size
if sw >= sh * self.rate:
return self.hor_resize
else:
return self.vert_resize | def function[get_resizer, parameter[self, size, target_size]]:
constant[Choose a resizer depending an image size]
<ast.Tuple object at 0x7da18f00fc10> assign[=] name[size]
if compare[name[sw] greater_or_equal[>=] binary_operation[name[sh] * name[self].rate]] begin[:]
return[name[self].hor_resize] | keyword[def] identifier[get_resizer] ( identifier[self] , identifier[size] , identifier[target_size] ):
literal[string]
identifier[sw] , identifier[sh] = identifier[size]
keyword[if] identifier[sw] >= identifier[sh] * identifier[self] . identifier[rate] :
keyword[return] identifier[self] . identifier[hor_resize]
keyword[else] :
keyword[return] identifier[self] . identifier[vert_resize] | def get_resizer(self, size, target_size):
"""Choose a resizer depending an image size"""
(sw, sh) = size
if sw >= sh * self.rate:
return self.hor_resize # depends on [control=['if'], data=[]]
else:
return self.vert_resize |
def update_table(self, table_name, provisioned_throughput):
"""
Updates the provisioned throughput for a given table.
:type table_name: str
:param table_name: The name of the table to update.
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName': table_name,
'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
return self.make_request('UpdateTable', json_input) | def function[update_table, parameter[self, table_name, provisioned_throughput]]:
constant[
Updates the provisioned throughput for a given table.
:type table_name: str
:param table_name: The name of the table to update.
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ee3e0>, <ast.Constant object at 0x7da1b26ed2d0>], [<ast.Name object at 0x7da1b26ee650>, <ast.Name object at 0x7da1b26ee290>]]
variable[json_input] assign[=] call[name[json].dumps, parameter[name[data]]]
return[call[name[self].make_request, parameter[constant[UpdateTable], name[json_input]]]] | keyword[def] identifier[update_table] ( identifier[self] , identifier[table_name] , identifier[provisioned_throughput] ):
literal[string]
identifier[data] ={ literal[string] : identifier[table_name] ,
literal[string] : identifier[provisioned_throughput] }
identifier[json_input] = identifier[json] . identifier[dumps] ( identifier[data] )
keyword[return] identifier[self] . identifier[make_request] ( literal[string] , identifier[json_input] ) | def update_table(self, table_name, provisioned_throughput):
"""
Updates the provisioned throughput for a given table.
:type table_name: str
:param table_name: The name of the table to update.
:type provisioned_throughput: dict
:param provisioned_throughput: A Python version of the
ProvisionedThroughput data structure defined by
DynamoDB.
"""
data = {'TableName': table_name, 'ProvisionedThroughput': provisioned_throughput}
json_input = json.dumps(data)
return self.make_request('UpdateTable', json_input) |
def rand_resize_crop(size:int, max_scale:float=2., ratios:Tuple[float,float]=(0.75,1.33)):
"Randomly resize and crop the image to a ratio in `ratios` after a zoom of `max_scale`."
return [zoom_squish(scale=(1.,max_scale,8), squish=(*ratios,8), invert=(0.5,8), row_pct=(0.,1.), col_pct=(0.,1.)),
crop(size=size)] | def function[rand_resize_crop, parameter[size, max_scale, ratios]]:
constant[Randomly resize and crop the image to a ratio in `ratios` after a zoom of `max_scale`.]
return[list[[<ast.Call object at 0x7da1b1dd82b0>, <ast.Call object at 0x7da1b1e98d00>]]] | keyword[def] identifier[rand_resize_crop] ( identifier[size] : identifier[int] , identifier[max_scale] : identifier[float] = literal[int] , identifier[ratios] : identifier[Tuple] [ identifier[float] , identifier[float] ]=( literal[int] , literal[int] )):
literal[string]
keyword[return] [ identifier[zoom_squish] ( identifier[scale] =( literal[int] , identifier[max_scale] , literal[int] ), identifier[squish] =(* identifier[ratios] , literal[int] ), identifier[invert] =( literal[int] , literal[int] ), identifier[row_pct] =( literal[int] , literal[int] ), identifier[col_pct] =( literal[int] , literal[int] )),
identifier[crop] ( identifier[size] = identifier[size] )] | def rand_resize_crop(size: int, max_scale: float=2.0, ratios: Tuple[float, float]=(0.75, 1.33)):
"""Randomly resize and crop the image to a ratio in `ratios` after a zoom of `max_scale`."""
return [zoom_squish(scale=(1.0, max_scale, 8), squish=(*ratios, 8), invert=(0.5, 8), row_pct=(0.0, 1.0), col_pct=(0.0, 1.0)), crop(size=size)] |
def _init_org(self):
""" Test and refresh credentials to the org specified. """
self.logger.info(
"Verifying and refreshing credentials for the specified org: {}.".format(
self.org_config.name
)
)
orig_config = self.org_config.config.copy()
# attempt to refresh the token, this can throw...
self.org_config.refresh_oauth_token(self.project_config.keychain)
if self.org_config.config != orig_config:
self.logger.info("Org info has changed, updating org in keychain")
self.project_config.keychain.set_org(self.org_config) | def function[_init_org, parameter[self]]:
constant[ Test and refresh credentials to the org specified. ]
call[name[self].logger.info, parameter[call[constant[Verifying and refreshing credentials for the specified org: {}.].format, parameter[name[self].org_config.name]]]]
variable[orig_config] assign[=] call[name[self].org_config.config.copy, parameter[]]
call[name[self].org_config.refresh_oauth_token, parameter[name[self].project_config.keychain]]
if compare[name[self].org_config.config not_equal[!=] name[orig_config]] begin[:]
call[name[self].logger.info, parameter[constant[Org info has changed, updating org in keychain]]]
call[name[self].project_config.keychain.set_org, parameter[name[self].org_config]] | keyword[def] identifier[_init_org] ( identifier[self] ):
literal[string]
identifier[self] . identifier[logger] . identifier[info] (
literal[string] . identifier[format] (
identifier[self] . identifier[org_config] . identifier[name]
)
)
identifier[orig_config] = identifier[self] . identifier[org_config] . identifier[config] . identifier[copy] ()
identifier[self] . identifier[org_config] . identifier[refresh_oauth_token] ( identifier[self] . identifier[project_config] . identifier[keychain] )
keyword[if] identifier[self] . identifier[org_config] . identifier[config] != identifier[orig_config] :
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[project_config] . identifier[keychain] . identifier[set_org] ( identifier[self] . identifier[org_config] ) | def _init_org(self):
""" Test and refresh credentials to the org specified. """
self.logger.info('Verifying and refreshing credentials for the specified org: {}.'.format(self.org_config.name))
orig_config = self.org_config.config.copy()
# attempt to refresh the token, this can throw...
self.org_config.refresh_oauth_token(self.project_config.keychain)
if self.org_config.config != orig_config:
self.logger.info('Org info has changed, updating org in keychain')
self.project_config.keychain.set_org(self.org_config) # depends on [control=['if'], data=[]] |
def keytype_path_to(args, keytype):
"""
Get the local filename for a keyring type
"""
if keytype == "admin":
return '{cluster}.client.admin.keyring'.format(
cluster=args.cluster)
if keytype == "mon":
return '{cluster}.mon.keyring'.format(
cluster=args.cluster)
return '{cluster}.bootstrap-{what}.keyring'.format(
cluster=args.cluster,
what=keytype) | def function[keytype_path_to, parameter[args, keytype]]:
constant[
Get the local filename for a keyring type
]
if compare[name[keytype] equal[==] constant[admin]] begin[:]
return[call[constant[{cluster}.client.admin.keyring].format, parameter[]]]
if compare[name[keytype] equal[==] constant[mon]] begin[:]
return[call[constant[{cluster}.mon.keyring].format, parameter[]]]
return[call[constant[{cluster}.bootstrap-{what}.keyring].format, parameter[]]] | keyword[def] identifier[keytype_path_to] ( identifier[args] , identifier[keytype] ):
literal[string]
keyword[if] identifier[keytype] == literal[string] :
keyword[return] literal[string] . identifier[format] (
identifier[cluster] = identifier[args] . identifier[cluster] )
keyword[if] identifier[keytype] == literal[string] :
keyword[return] literal[string] . identifier[format] (
identifier[cluster] = identifier[args] . identifier[cluster] )
keyword[return] literal[string] . identifier[format] (
identifier[cluster] = identifier[args] . identifier[cluster] ,
identifier[what] = identifier[keytype] ) | def keytype_path_to(args, keytype):
"""
Get the local filename for a keyring type
"""
if keytype == 'admin':
return '{cluster}.client.admin.keyring'.format(cluster=args.cluster) # depends on [control=['if'], data=[]]
if keytype == 'mon':
return '{cluster}.mon.keyring'.format(cluster=args.cluster) # depends on [control=['if'], data=[]]
return '{cluster}.bootstrap-{what}.keyring'.format(cluster=args.cluster, what=keytype) |
def generic_visit(self, node: AST, dfltChaining: bool = True) -> str:
"""Default handler, called if no explicit visitor function exists for
a node.
"""
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value) | def function[generic_visit, parameter[self, node, dfltChaining]]:
constant[Default handler, called if no explicit visitor function exists for
a node.
]
for taget[tuple[[<ast.Name object at 0x7da1b28bd8a0>, <ast.Name object at 0x7da1b28be1a0>]]] in starred[call[name[ast].iter_fields, parameter[name[node]]]] begin[:]
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
for taget[name[item]] in starred[name[value]] begin[:]
if call[name[isinstance], parameter[name[item], name[AST]]] begin[:]
call[name[self].visit, parameter[name[item]]] | keyword[def] identifier[generic_visit] ( identifier[self] , identifier[node] : identifier[AST] , identifier[dfltChaining] : identifier[bool] = keyword[True] )-> identifier[str] :
literal[string]
keyword[for] identifier[field] , identifier[value] keyword[in] identifier[ast] . identifier[iter_fields] ( identifier[node] ):
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[for] identifier[item] keyword[in] identifier[value] :
keyword[if] identifier[isinstance] ( identifier[item] , identifier[AST] ):
identifier[self] . identifier[visit] ( identifier[item] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[AST] ):
identifier[self] . identifier[visit] ( identifier[value] ) | def generic_visit(self, node: AST, dfltChaining: bool=True) -> str:
"""Default handler, called if no explicit visitor function exists for
a node.
"""
for (field, value) in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
elif isinstance(value, AST):
self.visit(value) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def gender(word, pos=NOUN):
""" Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote).
Returns None for words that are not nouns.
"""
w = word.lower()
if pos == NOUN:
# Default rules (baseline = 32%).
if w.endswith(gender_masculine):
return MASCULINE
if w.endswith(gender_feminine):
return FEMININE
if w.endswith(gender_neuter):
return NEUTER
# Majority vote.
for g in gender_majority_vote:
if w.endswith(gender_majority_vote[g]):
return g | def function[gender, parameter[word, pos]]:
constant[ Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote).
Returns None for words that are not nouns.
]
variable[w] assign[=] call[name[word].lower, parameter[]]
if compare[name[pos] equal[==] name[NOUN]] begin[:]
if call[name[w].endswith, parameter[name[gender_masculine]]] begin[:]
return[name[MASCULINE]]
if call[name[w].endswith, parameter[name[gender_feminine]]] begin[:]
return[name[FEMININE]]
if call[name[w].endswith, parameter[name[gender_neuter]]] begin[:]
return[name[NEUTER]]
for taget[name[g]] in starred[name[gender_majority_vote]] begin[:]
if call[name[w].endswith, parameter[call[name[gender_majority_vote]][name[g]]]] begin[:]
return[name[g]] | keyword[def] identifier[gender] ( identifier[word] , identifier[pos] = identifier[NOUN] ):
literal[string]
identifier[w] = identifier[word] . identifier[lower] ()
keyword[if] identifier[pos] == identifier[NOUN] :
keyword[if] identifier[w] . identifier[endswith] ( identifier[gender_masculine] ):
keyword[return] identifier[MASCULINE]
keyword[if] identifier[w] . identifier[endswith] ( identifier[gender_feminine] ):
keyword[return] identifier[FEMININE]
keyword[if] identifier[w] . identifier[endswith] ( identifier[gender_neuter] ):
keyword[return] identifier[NEUTER]
keyword[for] identifier[g] keyword[in] identifier[gender_majority_vote] :
keyword[if] identifier[w] . identifier[endswith] ( identifier[gender_majority_vote] [ identifier[g] ]):
keyword[return] identifier[g] | def gender(word, pos=NOUN):
""" Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote).
Returns None for words that are not nouns.
"""
w = word.lower()
if pos == NOUN:
# Default rules (baseline = 32%).
if w.endswith(gender_masculine):
return MASCULINE # depends on [control=['if'], data=[]]
if w.endswith(gender_feminine):
return FEMININE # depends on [control=['if'], data=[]]
if w.endswith(gender_neuter):
return NEUTER # depends on [control=['if'], data=[]]
# Majority vote.
for g in gender_majority_vote:
if w.endswith(gender_majority_vote[g]):
return g # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['g']] # depends on [control=['if'], data=[]] |
def save(self, projects):
"""Save the projects configs to local path
Args:
projects (dict): project_name -> project_data
"""
base_path = os.path.expanduser(self.path)
if not os.path.isdir(base_path):
return
logger.debug("Save projects config to %s", base_path)
for name, data in list(projects.items()):
project_file_path = self.get_project_config_path(name)
with open(project_file_path, "w") as f:
yaml.dump(data, stream = f, default_flow_style = False)
logger.debug("Project '%s' config has been writed to '%s'", name, project_file_path) | def function[save, parameter[self, projects]]:
constant[Save the projects configs to local path
Args:
projects (dict): project_name -> project_data
]
variable[base_path] assign[=] call[name[os].path.expanduser, parameter[name[self].path]]
if <ast.UnaryOp object at 0x7da1b034bf40> begin[:]
return[None]
call[name[logger].debug, parameter[constant[Save projects config to %s], name[base_path]]]
for taget[tuple[[<ast.Name object at 0x7da1b034a650>, <ast.Name object at 0x7da1b034acb0>]]] in starred[call[name[list], parameter[call[name[projects].items, parameter[]]]]] begin[:]
variable[project_file_path] assign[=] call[name[self].get_project_config_path, parameter[name[name]]]
with call[name[open], parameter[name[project_file_path], constant[w]]] begin[:]
call[name[yaml].dump, parameter[name[data]]]
call[name[logger].debug, parameter[constant[Project '%s' config has been writed to '%s'], name[name], name[project_file_path]]] | keyword[def] identifier[save] ( identifier[self] , identifier[projects] ):
literal[string]
identifier[base_path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[self] . identifier[path] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[base_path] ):
keyword[return]
identifier[logger] . identifier[debug] ( literal[string] , identifier[base_path] )
keyword[for] identifier[name] , identifier[data] keyword[in] identifier[list] ( identifier[projects] . identifier[items] ()):
identifier[project_file_path] = identifier[self] . identifier[get_project_config_path] ( identifier[name] )
keyword[with] identifier[open] ( identifier[project_file_path] , literal[string] ) keyword[as] identifier[f] :
identifier[yaml] . identifier[dump] ( identifier[data] , identifier[stream] = identifier[f] , identifier[default_flow_style] = keyword[False] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[name] , identifier[project_file_path] ) | def save(self, projects):
"""Save the projects configs to local path
Args:
projects (dict): project_name -> project_data
"""
base_path = os.path.expanduser(self.path)
if not os.path.isdir(base_path):
return # depends on [control=['if'], data=[]]
logger.debug('Save projects config to %s', base_path)
for (name, data) in list(projects.items()):
project_file_path = self.get_project_config_path(name)
with open(project_file_path, 'w') as f:
yaml.dump(data, stream=f, default_flow_style=False)
logger.debug("Project '%s' config has been writed to '%s'", name, project_file_path) # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=[]] |
def set_result(self, rval: bool) -> None:
""" Set the result of the evaluation. If the result is true, prune all of the children that didn't cut it
:param rval: Result of evaluation
"""
self.result = rval
if self.result:
self.nodes = [pn for pn in self.nodes if pn.result] | def function[set_result, parameter[self, rval]]:
constant[ Set the result of the evaluation. If the result is true, prune all of the children that didn't cut it
:param rval: Result of evaluation
]
name[self].result assign[=] name[rval]
if name[self].result begin[:]
name[self].nodes assign[=] <ast.ListComp object at 0x7da1b10e5240> | keyword[def] identifier[set_result] ( identifier[self] , identifier[rval] : identifier[bool] )-> keyword[None] :
literal[string]
identifier[self] . identifier[result] = identifier[rval]
keyword[if] identifier[self] . identifier[result] :
identifier[self] . identifier[nodes] =[ identifier[pn] keyword[for] identifier[pn] keyword[in] identifier[self] . identifier[nodes] keyword[if] identifier[pn] . identifier[result] ] | def set_result(self, rval: bool) -> None:
""" Set the result of the evaluation. If the result is true, prune all of the children that didn't cut it
:param rval: Result of evaluation
"""
self.result = rval
if self.result:
self.nodes = [pn for pn in self.nodes if pn.result] # depends on [control=['if'], data=[]] |
def to_latex(circuit, settings=None):
"""
Translates a given pyquil Program to a TikZ picture in a Latex document.
:param Program circuit: The circuit to be drawn, represented as a pyquil program.
:param dict settings: An optional dictionary with settings for drawing the circuit. See `get_default_settings`
in `latex_config` for more information about what settings should contain.
:return: LaTeX document string which can be compiled.
:rtype: string
"""
if settings is None:
settings = get_default_settings()
text = header(settings)
text += body(circuit, settings)
text += footer()
return text | def function[to_latex, parameter[circuit, settings]]:
constant[
Translates a given pyquil Program to a TikZ picture in a Latex document.
:param Program circuit: The circuit to be drawn, represented as a pyquil program.
:param dict settings: An optional dictionary with settings for drawing the circuit. See `get_default_settings`
in `latex_config` for more information about what settings should contain.
:return: LaTeX document string which can be compiled.
:rtype: string
]
if compare[name[settings] is constant[None]] begin[:]
variable[settings] assign[=] call[name[get_default_settings], parameter[]]
variable[text] assign[=] call[name[header], parameter[name[settings]]]
<ast.AugAssign object at 0x7da1b1bf8850>
<ast.AugAssign object at 0x7da1b1bfa710>
return[name[text]] | keyword[def] identifier[to_latex] ( identifier[circuit] , identifier[settings] = keyword[None] ):
literal[string]
keyword[if] identifier[settings] keyword[is] keyword[None] :
identifier[settings] = identifier[get_default_settings] ()
identifier[text] = identifier[header] ( identifier[settings] )
identifier[text] += identifier[body] ( identifier[circuit] , identifier[settings] )
identifier[text] += identifier[footer] ()
keyword[return] identifier[text] | def to_latex(circuit, settings=None):
"""
Translates a given pyquil Program to a TikZ picture in a Latex document.
:param Program circuit: The circuit to be drawn, represented as a pyquil program.
:param dict settings: An optional dictionary with settings for drawing the circuit. See `get_default_settings`
in `latex_config` for more information about what settings should contain.
:return: LaTeX document string which can be compiled.
:rtype: string
"""
if settings is None:
settings = get_default_settings() # depends on [control=['if'], data=['settings']]
text = header(settings)
text += body(circuit, settings)
text += footer()
return text |
def get_xgb_params(xgb_node):
"""
Retrieves parameters of a model.
"""
if hasattr(xgb_node, 'kwargs'):
# XGBoost >= 0.7
params = xgb_node.get_xgb_params()
else:
# XGBoost < 0.7
params = xgb_node.__dict__
return params | def function[get_xgb_params, parameter[xgb_node]]:
constant[
Retrieves parameters of a model.
]
if call[name[hasattr], parameter[name[xgb_node], constant[kwargs]]] begin[:]
variable[params] assign[=] call[name[xgb_node].get_xgb_params, parameter[]]
return[name[params]] | keyword[def] identifier[get_xgb_params] ( identifier[xgb_node] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[xgb_node] , literal[string] ):
identifier[params] = identifier[xgb_node] . identifier[get_xgb_params] ()
keyword[else] :
identifier[params] = identifier[xgb_node] . identifier[__dict__]
keyword[return] identifier[params] | def get_xgb_params(xgb_node):
"""
Retrieves parameters of a model.
"""
if hasattr(xgb_node, 'kwargs'):
# XGBoost >= 0.7
params = xgb_node.get_xgb_params() # depends on [control=['if'], data=[]]
else:
# XGBoost < 0.7
params = xgb_node.__dict__
return params |
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field.
"""
if not widget:
widget = self.field.widget
if DJANGO_VERSION > (1, 10):
# so that we can refer to the field when building the rendering context
widget._field = self.field
# Make sure that NgWidgetMixin is not already part of the widget's bases so it doesn't get added twice.
if not isinstance(widget, NgWidgetMixin):
widget.__class__ = type(widget.__class__.__name__, (NgWidgetMixin, widget.__class__), {})
return super(NgBoundField, self).as_widget(widget, attrs, only_initial) | def function[as_widget, parameter[self, widget, attrs, only_initial]]:
constant[
Renders the field.
]
if <ast.UnaryOp object at 0x7da207f99a20> begin[:]
variable[widget] assign[=] name[self].field.widget
if compare[name[DJANGO_VERSION] greater[>] tuple[[<ast.Constant object at 0x7da207f9a500>, <ast.Constant object at 0x7da207f9b040>]]] begin[:]
name[widget]._field assign[=] name[self].field
if <ast.UnaryOp object at 0x7da207f983a0> begin[:]
name[widget].__class__ assign[=] call[name[type], parameter[name[widget].__class__.__name__, tuple[[<ast.Name object at 0x7da207f992d0>, <ast.Attribute object at 0x7da207f9bb80>]], dictionary[[], []]]]
return[call[call[name[super], parameter[name[NgBoundField], name[self]]].as_widget, parameter[name[widget], name[attrs], name[only_initial]]]] | keyword[def] identifier[as_widget] ( identifier[self] , identifier[widget] = keyword[None] , identifier[attrs] = keyword[None] , identifier[only_initial] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[widget] :
identifier[widget] = identifier[self] . identifier[field] . identifier[widget]
keyword[if] identifier[DJANGO_VERSION] >( literal[int] , literal[int] ):
identifier[widget] . identifier[_field] = identifier[self] . identifier[field]
keyword[if] keyword[not] identifier[isinstance] ( identifier[widget] , identifier[NgWidgetMixin] ):
identifier[widget] . identifier[__class__] = identifier[type] ( identifier[widget] . identifier[__class__] . identifier[__name__] ,( identifier[NgWidgetMixin] , identifier[widget] . identifier[__class__] ),{})
keyword[return] identifier[super] ( identifier[NgBoundField] , identifier[self] ). identifier[as_widget] ( identifier[widget] , identifier[attrs] , identifier[only_initial] ) | def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field.
"""
if not widget:
widget = self.field.widget # depends on [control=['if'], data=[]]
if DJANGO_VERSION > (1, 10):
# so that we can refer to the field when building the rendering context
widget._field = self.field
# Make sure that NgWidgetMixin is not already part of the widget's bases so it doesn't get added twice.
if not isinstance(widget, NgWidgetMixin):
widget.__class__ = type(widget.__class__.__name__, (NgWidgetMixin, widget.__class__), {}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return super(NgBoundField, self).as_widget(widget, attrs, only_initial) |
def read(self, frames=-1, dtype='float64', always_2d=False,
fill_value=None, out=None):
"""Read from the file and return data as NumPy array.
Reads the given number of frames in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If ``frames < 0``, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from
``-2**31`` to ``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for
``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional NumPy (frames x channels) array is
returned. If the sound file has only one channel, a
one-dimensional array is returned. Use ``always_2d=True``
to return a two-dimensional array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is
smaller than the length of `out`) and no `fill_value` is
given, then only a part of `out` is overwritten and a view
containing all valid frames is returned. numpy.ndarray or
type(out)
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data
is always returned as a two-dimensional array, even if the
audio file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file,
the rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is
returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored! If
`frames` is not given, it is obtained from the length of
`out`.
Examples
--------
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Reading 3 frames from a stereo file:
>>> myfile.read(3)
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
[ 0.67398441, -0.11516333]])
>>> myfile.close()
See Also
--------
buffer_read, .write
"""
if out is None:
frames = self._check_frames(frames, fill_value)
out = self._create_empty_array(frames, always_2d, dtype)
else:
if frames < 0 or frames > len(out):
frames = len(out)
frames = self._array_io('read', out, frames)
if len(out) > frames:
if fill_value is None:
out = out[:frames]
else:
out[frames:] = fill_value
return out | def function[read, parameter[self, frames, dtype, always_2d, fill_value, out]]:
constant[Read from the file and return data as NumPy array.
Reads the given number of frames in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If ``frames < 0``, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from
``-2**31`` to ``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for
``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional NumPy (frames x channels) array is
returned. If the sound file has only one channel, a
one-dimensional array is returned. Use ``always_2d=True``
to return a two-dimensional array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is
smaller than the length of `out`) and no `fill_value` is
given, then only a part of `out` is overwritten and a view
containing all valid frames is returned. numpy.ndarray or
type(out)
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data
is always returned as a two-dimensional array, even if the
audio file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file,
the rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is
returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored! If
`frames` is not given, it is obtained from the length of
`out`.
Examples
--------
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Reading 3 frames from a stereo file:
>>> myfile.read(3)
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
[ 0.67398441, -0.11516333]])
>>> myfile.close()
See Also
--------
buffer_read, .write
]
if compare[name[out] is constant[None]] begin[:]
variable[frames] assign[=] call[name[self]._check_frames, parameter[name[frames], name[fill_value]]]
variable[out] assign[=] call[name[self]._create_empty_array, parameter[name[frames], name[always_2d], name[dtype]]]
variable[frames] assign[=] call[name[self]._array_io, parameter[constant[read], name[out], name[frames]]]
if compare[call[name[len], parameter[name[out]]] greater[>] name[frames]] begin[:]
if compare[name[fill_value] is constant[None]] begin[:]
variable[out] assign[=] call[name[out]][<ast.Slice object at 0x7da1b1a8e470>]
return[name[out]] | keyword[def] identifier[read] ( identifier[self] , identifier[frames] =- literal[int] , identifier[dtype] = literal[string] , identifier[always_2d] = keyword[False] ,
identifier[fill_value] = keyword[None] , identifier[out] = keyword[None] ):
literal[string]
keyword[if] identifier[out] keyword[is] keyword[None] :
identifier[frames] = identifier[self] . identifier[_check_frames] ( identifier[frames] , identifier[fill_value] )
identifier[out] = identifier[self] . identifier[_create_empty_array] ( identifier[frames] , identifier[always_2d] , identifier[dtype] )
keyword[else] :
keyword[if] identifier[frames] < literal[int] keyword[or] identifier[frames] > identifier[len] ( identifier[out] ):
identifier[frames] = identifier[len] ( identifier[out] )
identifier[frames] = identifier[self] . identifier[_array_io] ( literal[string] , identifier[out] , identifier[frames] )
keyword[if] identifier[len] ( identifier[out] )> identifier[frames] :
keyword[if] identifier[fill_value] keyword[is] keyword[None] :
identifier[out] = identifier[out] [: identifier[frames] ]
keyword[else] :
identifier[out] [ identifier[frames] :]= identifier[fill_value]
keyword[return] identifier[out] | def read(self, frames=-1, dtype='float64', always_2d=False, fill_value=None, out=None):
"""Read from the file and return data as NumPy array.
Reads the given number of frames in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If ``frames < 0``, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from
``-2**31`` to ``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for
``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional NumPy (frames x channels) array is
returned. If the sound file has only one channel, a
one-dimensional array is returned. Use ``always_2d=True``
to return a two-dimensional array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is
smaller than the length of `out`) and no `fill_value` is
given, then only a part of `out` is overwritten and a view
containing all valid frames is returned. numpy.ndarray or
type(out)
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data
is always returned as a two-dimensional array, even if the
audio file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file,
the rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is
returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored! If
`frames` is not given, it is obtained from the length of
`out`.
Examples
--------
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Reading 3 frames from a stereo file:
>>> myfile.read(3)
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
[ 0.67398441, -0.11516333]])
>>> myfile.close()
See Also
--------
buffer_read, .write
"""
if out is None:
frames = self._check_frames(frames, fill_value)
out = self._create_empty_array(frames, always_2d, dtype) # depends on [control=['if'], data=['out']]
elif frames < 0 or frames > len(out):
frames = len(out) # depends on [control=['if'], data=[]]
frames = self._array_io('read', out, frames)
if len(out) > frames:
if fill_value is None:
out = out[:frames] # depends on [control=['if'], data=[]]
else:
out[frames:] = fill_value # depends on [control=['if'], data=['frames']]
return out |
def get_long_description():
"""
Returns the long description of HaTeMiLe for Python.
:return: The long description of HaTeMiLe for Python.
:rtype: str
"""
with open(
os.path.join(BASE_DIRECTORY, 'README.md'),
'r',
encoding='utf-8'
) as readme_file:
return readme_file.read() | def function[get_long_description, parameter[]]:
constant[
Returns the long description of HaTeMiLe for Python.
:return: The long description of HaTeMiLe for Python.
:rtype: str
]
with call[name[open], parameter[call[name[os].path.join, parameter[name[BASE_DIRECTORY], constant[README.md]]], constant[r]]] begin[:]
return[call[name[readme_file].read, parameter[]]] | keyword[def] identifier[get_long_description] ():
literal[string]
keyword[with] identifier[open] (
identifier[os] . identifier[path] . identifier[join] ( identifier[BASE_DIRECTORY] , literal[string] ),
literal[string] ,
identifier[encoding] = literal[string]
) keyword[as] identifier[readme_file] :
keyword[return] identifier[readme_file] . identifier[read] () | def get_long_description():
"""
Returns the long description of HaTeMiLe for Python.
:return: The long description of HaTeMiLe for Python.
:rtype: str
"""
with open(os.path.join(BASE_DIRECTORY, 'README.md'), 'r', encoding='utf-8') as readme_file:
return readme_file.read() # depends on [control=['with'], data=['readme_file']] |
def http_construct(args, unknown):
"""
Construct the --http <arg> from the args/unknown space -- relevant only for 'purl'.
:param args:
:param unknown:
:return:
"""
str_http = ''
b_httpSpecd = False
if '--http' in unknown:
try:
str_httpArg = unknown[unknown.index('--http')+1]
unknown.remove('--http')
unknown.remove(str_httpArg)
except:
str_httpArg = ""
str_http = '--http %s' % str_httpArg
b_httpSpecd = True
if not b_httpSpecd:
str_serverIP = "172.17.0.2"
str_serverPort = '5010'
try:
if args.b_pman:
str_serverIP = os.environ['PMAN_PORT_5010_TCP_ADDR']
str_serverPort = os.environ['PMAN_PORT_5010_TCP_PORT']
if args.b_pfioh:
str_serverIP = os.environ['PFIOH_PORT_5055_TCP_ADDR']
str_serverPort = os.environ['PFIOH_PORT_5055_TCP_PORT']
except:
pass
str_http = '--http %s:%s/api/v1/cmd/' % (str_serverIP, str_serverPort)
return str_http | def function[http_construct, parameter[args, unknown]]:
constant[
Construct the --http <arg> from the args/unknown space -- relevant only for 'purl'.
:param args:
:param unknown:
:return:
]
variable[str_http] assign[=] constant[]
variable[b_httpSpecd] assign[=] constant[False]
if compare[constant[--http] in name[unknown]] begin[:]
<ast.Try object at 0x7da204623d60>
variable[str_http] assign[=] binary_operation[constant[--http %s] <ast.Mod object at 0x7da2590d6920> name[str_httpArg]]
variable[b_httpSpecd] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da204620400> begin[:]
variable[str_serverIP] assign[=] constant[172.17.0.2]
variable[str_serverPort] assign[=] constant[5010]
<ast.Try object at 0x7da204621930>
variable[str_http] assign[=] binary_operation[constant[--http %s:%s/api/v1/cmd/] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204623e50>, <ast.Name object at 0x7da2046211e0>]]]
return[name[str_http]] | keyword[def] identifier[http_construct] ( identifier[args] , identifier[unknown] ):
literal[string]
identifier[str_http] = literal[string]
identifier[b_httpSpecd] = keyword[False]
keyword[if] literal[string] keyword[in] identifier[unknown] :
keyword[try] :
identifier[str_httpArg] = identifier[unknown] [ identifier[unknown] . identifier[index] ( literal[string] )+ literal[int] ]
identifier[unknown] . identifier[remove] ( literal[string] )
identifier[unknown] . identifier[remove] ( identifier[str_httpArg] )
keyword[except] :
identifier[str_httpArg] = literal[string]
identifier[str_http] = literal[string] % identifier[str_httpArg]
identifier[b_httpSpecd] = keyword[True]
keyword[if] keyword[not] identifier[b_httpSpecd] :
identifier[str_serverIP] = literal[string]
identifier[str_serverPort] = literal[string]
keyword[try] :
keyword[if] identifier[args] . identifier[b_pman] :
identifier[str_serverIP] = identifier[os] . identifier[environ] [ literal[string] ]
identifier[str_serverPort] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[if] identifier[args] . identifier[b_pfioh] :
identifier[str_serverIP] = identifier[os] . identifier[environ] [ literal[string] ]
identifier[str_serverPort] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[except] :
keyword[pass]
identifier[str_http] = literal[string] %( identifier[str_serverIP] , identifier[str_serverPort] )
keyword[return] identifier[str_http] | def http_construct(args, unknown):
"""
Construct the --http <arg> from the args/unknown space -- relevant only for 'purl'.
:param args:
:param unknown:
:return:
"""
str_http = ''
b_httpSpecd = False
if '--http' in unknown:
try:
str_httpArg = unknown[unknown.index('--http') + 1]
unknown.remove('--http')
unknown.remove(str_httpArg) # depends on [control=['try'], data=[]]
except:
str_httpArg = '' # depends on [control=['except'], data=[]]
str_http = '--http %s' % str_httpArg
b_httpSpecd = True # depends on [control=['if'], data=['unknown']]
if not b_httpSpecd:
str_serverIP = '172.17.0.2'
str_serverPort = '5010'
try:
if args.b_pman:
str_serverIP = os.environ['PMAN_PORT_5010_TCP_ADDR']
str_serverPort = os.environ['PMAN_PORT_5010_TCP_PORT'] # depends on [control=['if'], data=[]]
if args.b_pfioh:
str_serverIP = os.environ['PFIOH_PORT_5055_TCP_ADDR']
str_serverPort = os.environ['PFIOH_PORT_5055_TCP_PORT'] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
str_http = '--http %s:%s/api/v1/cmd/' % (str_serverIP, str_serverPort) # depends on [control=['if'], data=[]]
return str_http |
def rmon_alarm_entry_alarm_falling_event_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon")
alarm_entry = ET.SubElement(rmon, "alarm-entry")
alarm_index_key = ET.SubElement(alarm_entry, "alarm-index")
alarm_index_key.text = kwargs.pop('alarm_index')
alarm_falling_event_index = ET.SubElement(alarm_entry, "alarm-falling-event-index")
alarm_falling_event_index.text = kwargs.pop('alarm_falling_event_index')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[rmon_alarm_entry_alarm_falling_event_index, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[rmon] assign[=] call[name[ET].SubElement, parameter[name[config], constant[rmon]]]
variable[alarm_entry] assign[=] call[name[ET].SubElement, parameter[name[rmon], constant[alarm-entry]]]
variable[alarm_index_key] assign[=] call[name[ET].SubElement, parameter[name[alarm_entry], constant[alarm-index]]]
name[alarm_index_key].text assign[=] call[name[kwargs].pop, parameter[constant[alarm_index]]]
variable[alarm_falling_event_index] assign[=] call[name[ET].SubElement, parameter[name[alarm_entry], constant[alarm-falling-event-index]]]
name[alarm_falling_event_index].text assign[=] call[name[kwargs].pop, parameter[constant[alarm_falling_event_index]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[rmon_alarm_entry_alarm_falling_event_index] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[rmon] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[alarm_entry] = identifier[ET] . identifier[SubElement] ( identifier[rmon] , literal[string] )
identifier[alarm_index_key] = identifier[ET] . identifier[SubElement] ( identifier[alarm_entry] , literal[string] )
identifier[alarm_index_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[alarm_falling_event_index] = identifier[ET] . identifier[SubElement] ( identifier[alarm_entry] , literal[string] )
identifier[alarm_falling_event_index] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def rmon_alarm_entry_alarm_falling_event_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
rmon = ET.SubElement(config, 'rmon', xmlns='urn:brocade.com:mgmt:brocade-rmon')
alarm_entry = ET.SubElement(rmon, 'alarm-entry')
alarm_index_key = ET.SubElement(alarm_entry, 'alarm-index')
alarm_index_key.text = kwargs.pop('alarm_index')
alarm_falling_event_index = ET.SubElement(alarm_entry, 'alarm-falling-event-index')
alarm_falling_event_index.text = kwargs.pop('alarm_falling_event_index')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def black(cls):
"Make the text foreground color black."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
#wAttributes |= win32.FOREGROUND_BLACK
cls._set_text_attributes(wAttributes) | def function[black, parameter[cls]]:
constant[Make the text foreground color black.]
variable[wAttributes] assign[=] call[name[cls]._get_text_attributes, parameter[]]
<ast.AugAssign object at 0x7da1b06fba60>
call[name[cls]._set_text_attributes, parameter[name[wAttributes]]] | keyword[def] identifier[black] ( identifier[cls] ):
literal[string]
identifier[wAttributes] = identifier[cls] . identifier[_get_text_attributes] ()
identifier[wAttributes] &=~ identifier[win32] . identifier[FOREGROUND_MASK]
identifier[cls] . identifier[_set_text_attributes] ( identifier[wAttributes] ) | def black(cls):
"""Make the text foreground color black."""
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
#wAttributes |= win32.FOREGROUND_BLACK
cls._set_text_attributes(wAttributes) |
def size(self, size=None):
"""Returns or sets (if a value is provided) the diameter of the series'
data points.
:param Number size: If given, the series' size will be set to\
this.
:rtype: ``Number``"""
if size is None:
return self._size
else:
if not is_numeric(size):
raise TypeError(
"size must be number, not '%s'" % str(size)
)
self._size = size | def function[size, parameter[self, size]]:
constant[Returns or sets (if a value is provided) the diameter of the series'
data points.
:param Number size: If given, the series' size will be set to this.
:rtype: ``Number``]
if compare[name[size] is constant[None]] begin[:]
return[name[self]._size] | keyword[def] identifier[size] ( identifier[self] , identifier[size] = keyword[None] ):
literal[string]
keyword[if] identifier[size] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_size]
keyword[else] :
keyword[if] keyword[not] identifier[is_numeric] ( identifier[size] ):
keyword[raise] identifier[TypeError] (
literal[string] % identifier[str] ( identifier[size] )
)
identifier[self] . identifier[_size] = identifier[size] | def size(self, size=None):
"""Returns or sets (if a value is provided) the diameter of the series'
data points.
:param Number size: If given, the series' size will be set to this.
:rtype: ``Number``"""
if size is None:
return self._size # depends on [control=['if'], data=[]]
else:
if not is_numeric(size):
raise TypeError("size must be number, not '%s'" % str(size)) # depends on [control=['if'], data=[]]
self._size = size |
def PlistValueToPlainValue(plist):
"""Takes the plist contents generated by binplist and returns a plain dict.
binplist uses rich types to express some of the plist types. We need to
convert them to types that RDFValueArray will be able to transport.
Args:
plist: A plist to convert.
Returns:
A simple python type.
"""
if isinstance(plist, dict):
ret_value = dict()
for key, value in iteritems(plist):
ret_value[key] = PlistValueToPlainValue(value)
return ret_value
elif isinstance(plist, list):
return [PlistValueToPlainValue(value) for value in plist]
elif isinstance(plist, datetime.datetime):
return (calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond
return plist | def function[PlistValueToPlainValue, parameter[plist]]:
constant[Takes the plist contents generated by binplist and returns a plain dict.
binplist uses rich types to express some of the plist types. We need to
convert them to types that RDFValueArray will be able to transport.
Args:
plist: A plist to convert.
Returns:
A simple python type.
]
if call[name[isinstance], parameter[name[plist], name[dict]]] begin[:]
variable[ret_value] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1c3cbe0>, <ast.Name object at 0x7da1b1c3fb50>]]] in starred[call[name[iteritems], parameter[name[plist]]]] begin[:]
call[name[ret_value]][name[key]] assign[=] call[name[PlistValueToPlainValue], parameter[name[value]]]
return[name[ret_value]]
return[name[plist]] | keyword[def] identifier[PlistValueToPlainValue] ( identifier[plist] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[plist] , identifier[dict] ):
identifier[ret_value] = identifier[dict] ()
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[iteritems] ( identifier[plist] ):
identifier[ret_value] [ identifier[key] ]= identifier[PlistValueToPlainValue] ( identifier[value] )
keyword[return] identifier[ret_value]
keyword[elif] identifier[isinstance] ( identifier[plist] , identifier[list] ):
keyword[return] [ identifier[PlistValueToPlainValue] ( identifier[value] ) keyword[for] identifier[value] keyword[in] identifier[plist] ]
keyword[elif] identifier[isinstance] ( identifier[plist] , identifier[datetime] . identifier[datetime] ):
keyword[return] ( identifier[calendar] . identifier[timegm] ( identifier[plist] . identifier[utctimetuple] ())* literal[int] )+ identifier[plist] . identifier[microsecond]
keyword[return] identifier[plist] | def PlistValueToPlainValue(plist):
"""Takes the plist contents generated by binplist and returns a plain dict.
binplist uses rich types to express some of the plist types. We need to
convert them to types that RDFValueArray will be able to transport.
Args:
plist: A plist to convert.
Returns:
A simple python type.
"""
if isinstance(plist, dict):
ret_value = dict()
for (key, value) in iteritems(plist):
ret_value[key] = PlistValueToPlainValue(value) # depends on [control=['for'], data=[]]
return ret_value # depends on [control=['if'], data=[]]
elif isinstance(plist, list):
return [PlistValueToPlainValue(value) for value in plist] # depends on [control=['if'], data=[]]
elif isinstance(plist, datetime.datetime):
return calendar.timegm(plist.utctimetuple()) * 1000000 + plist.microsecond # depends on [control=['if'], data=[]]
return plist |
def send_report(self, report_parts):
"""
Publish the report parts to local files. Each report part is a text
with a title and specific extension. For html and plaintext sending
the report part is unique, for csv send also the stats and unparsed
string are plain text and report items are csv texts.
"""
logger.info('Checking and creating the report directory')
report_parts = sorted(
filter(lambda x: x.fmt in self.formats, report_parts),
key=lambda x: self.formats.index(x.fmt)
)
workdir = os.path.join(self.pubdir, self.dirname)
if not os.path.isdir(workdir):
try:
os.makedirs(workdir)
except OSError as e:
logger.error('Error creating directory "{0}": {0}'.format(workdir, e))
return
fmtname = '{0}-{1}-{2}.{3}' if len(report_parts) > 1 else '{0}-{2}.{3}'
for i, text_part in enumerate(filter(lambda x: x.fmt in self.formats, report_parts)):
filename = fmtname.format(self.filename, i, socket.gethostname(), text_part.ext)
repfile = os.path.join(workdir, filename)
logger.info('Dumping the report part %d into %r', i, repfile)
fh = open(repfile, 'w')
fh.write(text_part.text)
fh.close()
print('Report part saved in: %r' % repfile)
if self.notify:
logger.info('Creating an email message')
email_address = self.config.get('main', 'email_address')
smtp_server = self.config.get('main', 'smtp_server')
publoc = os.path.join(self.pubroot, self.dirname)
eml = MIMEText('New lograptor report is available at:\r\n{0}'.format(publoc))
eml['Subject'] = '{0} system events: {1} (report notification)'.format(
socket.gethostname(), time.strftime('%c', time.localtime())
)
eml['Date'] = formatdate()
eml['From'] = email_address
eml['To'] = ', '.join(self.notify)
eml['X-Mailer'] = u'{0}-{1}'.format(package_name, __version__)
mail_message(smtp_server, eml.as_string(), email_address, self.notify)
print('Notification mailed to: {0}'.format(','.join(self.notify)))
if self.rawlogs:
logfilename = '{0}.log'.format(self.filename)
logfile = os.path.join(workdir, '{0}.gz'.format(logfilename))
logger.info('Gzipping logs and writing them to %r', logfilename)
outfh = open(logfile, 'w+b')
do_chunked_gzip(self.rawfh, outfh, logfilename)
outfh.close()
print('Gzipped logs saved in: {0}'.format(logfile))
# Purge old reports
self.prune_old() | def function[send_report, parameter[self, report_parts]]:
constant[
Publish the report parts to local files. Each report part is a text
with a title and specific extension. For html and plaintext sending
the report part is unique, for csv send also the stats and unparsed
string are plain text and report items are csv texts.
]
call[name[logger].info, parameter[constant[Checking and creating the report directory]]]
variable[report_parts] assign[=] call[name[sorted], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da18f00ffa0>, name[report_parts]]]]]
variable[workdir] assign[=] call[name[os].path.join, parameter[name[self].pubdir, name[self].dirname]]
if <ast.UnaryOp object at 0x7da18f00f760> begin[:]
<ast.Try object at 0x7da18f00e380>
variable[fmtname] assign[=] <ast.IfExp object at 0x7da204962350>
for taget[tuple[[<ast.Name object at 0x7da204961d80>, <ast.Name object at 0x7da204960580>]]] in starred[call[name[enumerate], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da2049604f0>, name[report_parts]]]]]] begin[:]
variable[filename] assign[=] call[name[fmtname].format, parameter[name[self].filename, name[i], call[name[socket].gethostname, parameter[]], name[text_part].ext]]
variable[repfile] assign[=] call[name[os].path.join, parameter[name[workdir], name[filename]]]
call[name[logger].info, parameter[constant[Dumping the report part %d into %r], name[i], name[repfile]]]
variable[fh] assign[=] call[name[open], parameter[name[repfile], constant[w]]]
call[name[fh].write, parameter[name[text_part].text]]
call[name[fh].close, parameter[]]
call[name[print], parameter[binary_operation[constant[Report part saved in: %r] <ast.Mod object at 0x7da2590d6920> name[repfile]]]]
if name[self].notify begin[:]
call[name[logger].info, parameter[constant[Creating an email message]]]
variable[email_address] assign[=] call[name[self].config.get, parameter[constant[main], constant[email_address]]]
variable[smtp_server] assign[=] call[name[self].config.get, parameter[constant[main], constant[smtp_server]]]
variable[publoc] assign[=] call[name[os].path.join, parameter[name[self].pubroot, name[self].dirname]]
variable[eml] assign[=] call[name[MIMEText], parameter[call[constant[New lograptor report is available at:
{0}].format, parameter[name[publoc]]]]]
call[name[eml]][constant[Subject]] assign[=] call[constant[{0} system events: {1} (report notification)].format, parameter[call[name[socket].gethostname, parameter[]], call[name[time].strftime, parameter[constant[%c], call[name[time].localtime, parameter[]]]]]]
call[name[eml]][constant[Date]] assign[=] call[name[formatdate], parameter[]]
call[name[eml]][constant[From]] assign[=] name[email_address]
call[name[eml]][constant[To]] assign[=] call[constant[, ].join, parameter[name[self].notify]]
call[name[eml]][constant[X-Mailer]] assign[=] call[constant[{0}-{1}].format, parameter[name[package_name], name[__version__]]]
call[name[mail_message], parameter[name[smtp_server], call[name[eml].as_string, parameter[]], name[email_address], name[self].notify]]
call[name[print], parameter[call[constant[Notification mailed to: {0}].format, parameter[call[constant[,].join, parameter[name[self].notify]]]]]]
if name[self].rawlogs begin[:]
variable[logfilename] assign[=] call[constant[{0}.log].format, parameter[name[self].filename]]
variable[logfile] assign[=] call[name[os].path.join, parameter[name[workdir], call[constant[{0}.gz].format, parameter[name[logfilename]]]]]
call[name[logger].info, parameter[constant[Gzipping logs and writing them to %r], name[logfilename]]]
variable[outfh] assign[=] call[name[open], parameter[name[logfile], constant[w+b]]]
call[name[do_chunked_gzip], parameter[name[self].rawfh, name[outfh], name[logfilename]]]
call[name[outfh].close, parameter[]]
call[name[print], parameter[call[constant[Gzipped logs saved in: {0}].format, parameter[name[logfile]]]]]
call[name[self].prune_old, parameter[]] | keyword[def] identifier[send_report] ( identifier[self] , identifier[report_parts] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
identifier[report_parts] = identifier[sorted] (
identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] . identifier[fmt] keyword[in] identifier[self] . identifier[formats] , identifier[report_parts] ),
identifier[key] = keyword[lambda] identifier[x] : identifier[self] . identifier[formats] . identifier[index] ( identifier[x] . identifier[fmt] )
)
identifier[workdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[pubdir] , identifier[self] . identifier[dirname] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[workdir] ):
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[workdir] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[workdir] , identifier[e] ))
keyword[return]
identifier[fmtname] = literal[string] keyword[if] identifier[len] ( identifier[report_parts] )> literal[int] keyword[else] literal[string]
keyword[for] identifier[i] , identifier[text_part] keyword[in] identifier[enumerate] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] . identifier[fmt] keyword[in] identifier[self] . identifier[formats] , identifier[report_parts] )):
identifier[filename] = identifier[fmtname] . identifier[format] ( identifier[self] . identifier[filename] , identifier[i] , identifier[socket] . identifier[gethostname] (), identifier[text_part] . identifier[ext] )
identifier[repfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[workdir] , identifier[filename] )
identifier[logger] . identifier[info] ( literal[string] , identifier[i] , identifier[repfile] )
identifier[fh] = identifier[open] ( identifier[repfile] , literal[string] )
identifier[fh] . identifier[write] ( identifier[text_part] . identifier[text] )
identifier[fh] . identifier[close] ()
identifier[print] ( literal[string] % identifier[repfile] )
keyword[if] identifier[self] . identifier[notify] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[email_address] = identifier[self] . identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[smtp_server] = identifier[self] . identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[publoc] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[pubroot] , identifier[self] . identifier[dirname] )
identifier[eml] = identifier[MIMEText] ( literal[string] . identifier[format] ( identifier[publoc] ))
identifier[eml] [ literal[string] ]= literal[string] . identifier[format] (
identifier[socket] . identifier[gethostname] (), identifier[time] . identifier[strftime] ( literal[string] , identifier[time] . identifier[localtime] ())
)
identifier[eml] [ literal[string] ]= identifier[formatdate] ()
identifier[eml] [ literal[string] ]= identifier[email_address]
identifier[eml] [ literal[string] ]= literal[string] . identifier[join] ( identifier[self] . identifier[notify] )
identifier[eml] [ literal[string] ]= literal[string] . identifier[format] ( identifier[package_name] , identifier[__version__] )
identifier[mail_message] ( identifier[smtp_server] , identifier[eml] . identifier[as_string] (), identifier[email_address] , identifier[self] . identifier[notify] )
identifier[print] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[self] . identifier[notify] )))
keyword[if] identifier[self] . identifier[rawlogs] :
identifier[logfilename] = literal[string] . identifier[format] ( identifier[self] . identifier[filename] )
identifier[logfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[workdir] , literal[string] . identifier[format] ( identifier[logfilename] ))
identifier[logger] . identifier[info] ( literal[string] , identifier[logfilename] )
identifier[outfh] = identifier[open] ( identifier[logfile] , literal[string] )
identifier[do_chunked_gzip] ( identifier[self] . identifier[rawfh] , identifier[outfh] , identifier[logfilename] )
identifier[outfh] . identifier[close] ()
identifier[print] ( literal[string] . identifier[format] ( identifier[logfile] ))
identifier[self] . identifier[prune_old] () | def send_report(self, report_parts):
"""
Publish the report parts to local files. Each report part is a text
with a title and specific extension. For html and plaintext sending
the report part is unique, for csv send also the stats and unparsed
string are plain text and report items are csv texts.
"""
logger.info('Checking and creating the report directory')
report_parts = sorted(filter(lambda x: x.fmt in self.formats, report_parts), key=lambda x: self.formats.index(x.fmt))
workdir = os.path.join(self.pubdir, self.dirname)
if not os.path.isdir(workdir):
try:
os.makedirs(workdir) # depends on [control=['try'], data=[]]
except OSError as e:
logger.error('Error creating directory "{0}": {0}'.format(workdir, e))
return # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
fmtname = '{0}-{1}-{2}.{3}' if len(report_parts) > 1 else '{0}-{2}.{3}'
for (i, text_part) in enumerate(filter(lambda x: x.fmt in self.formats, report_parts)):
filename = fmtname.format(self.filename, i, socket.gethostname(), text_part.ext)
repfile = os.path.join(workdir, filename)
logger.info('Dumping the report part %d into %r', i, repfile)
fh = open(repfile, 'w')
fh.write(text_part.text)
fh.close()
print('Report part saved in: %r' % repfile) # depends on [control=['for'], data=[]]
if self.notify:
logger.info('Creating an email message')
email_address = self.config.get('main', 'email_address')
smtp_server = self.config.get('main', 'smtp_server')
publoc = os.path.join(self.pubroot, self.dirname)
eml = MIMEText('New lograptor report is available at:\r\n{0}'.format(publoc))
eml['Subject'] = '{0} system events: {1} (report notification)'.format(socket.gethostname(), time.strftime('%c', time.localtime()))
eml['Date'] = formatdate()
eml['From'] = email_address
eml['To'] = ', '.join(self.notify)
eml['X-Mailer'] = u'{0}-{1}'.format(package_name, __version__)
mail_message(smtp_server, eml.as_string(), email_address, self.notify)
print('Notification mailed to: {0}'.format(','.join(self.notify))) # depends on [control=['if'], data=[]]
if self.rawlogs:
logfilename = '{0}.log'.format(self.filename)
logfile = os.path.join(workdir, '{0}.gz'.format(logfilename))
logger.info('Gzipping logs and writing them to %r', logfilename)
outfh = open(logfile, 'w+b')
do_chunked_gzip(self.rawfh, outfh, logfilename)
outfh.close()
print('Gzipped logs saved in: {0}'.format(logfile)) # depends on [control=['if'], data=[]]
# Purge old reports
self.prune_old() |
def main(source):
"""
For a given command line supplied argument, negotiate the content, parse
the schema and then return any issues to stdout or if no schema issues,
return success exit code.
"""
if source is None:
click.echo(
"You need to supply a file or url to a schema to a swagger schema, for"
"the validator to work."
)
return 1
try:
load(source)
click.echo("Validation passed")
return 0
except ValidationError as e:
raise click.ClickException(str(e)) | def function[main, parameter[source]]:
constant[
For a given command line supplied argument, negotiate the content, parse
the schema and then return any issues to stdout or if no schema issues,
return success exit code.
]
if compare[name[source] is constant[None]] begin[:]
call[name[click].echo, parameter[constant[You need to supply a file or url to a schema to a swagger schema, forthe validator to work.]]]
return[constant[1]]
<ast.Try object at 0x7da1b0de1de0> | keyword[def] identifier[main] ( identifier[source] ):
literal[string]
keyword[if] identifier[source] keyword[is] keyword[None] :
identifier[click] . identifier[echo] (
literal[string]
literal[string]
)
keyword[return] literal[int]
keyword[try] :
identifier[load] ( identifier[source] )
identifier[click] . identifier[echo] ( literal[string] )
keyword[return] literal[int]
keyword[except] identifier[ValidationError] keyword[as] identifier[e] :
keyword[raise] identifier[click] . identifier[ClickException] ( identifier[str] ( identifier[e] )) | def main(source):
"""
For a given command line supplied argument, negotiate the content, parse
the schema and then return any issues to stdout or if no schema issues,
return success exit code.
"""
if source is None:
click.echo('You need to supply a file or url to a schema to a swagger schema, forthe validator to work.')
return 1 # depends on [control=['if'], data=[]]
try:
load(source)
click.echo('Validation passed')
return 0 # depends on [control=['try'], data=[]]
except ValidationError as e:
raise click.ClickException(str(e)) # depends on [control=['except'], data=['e']] |
def _parse_01(ofiles, individual=False):
"""
a subfunction for summarizing results
"""
## parse results from outfiles
cols = []
dats = []
for ofile in ofiles:
## parse file
with open(ofile) as infile:
dat = infile.read()
lastbits = dat.split(".mcmc.txt\n\n")[1:]
results = lastbits[0].split("\n\n")[0].split()
## get shape from ...
shape = (((len(results) - 3) / 4), 4)
dat = np.array(results[3:]).reshape(shape)
cols.append(dat[:, 3].astype(float))
if not individual:
## get mean results across reps
cols = np.array(cols)
cols = cols.sum(axis=0) / len(ofiles) #10.
dat[:, 3] = cols.astype(str)
## format as a DF
df = pd.DataFrame(dat[:, 1:])
df.columns = ["delim", "prior", "posterior"]
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
df["nspecies"] = nspecies
return df
else:
## get mean results across reps
#return cols
res = []
for i in xrange(len(cols)):
x = dat
x[:, 3] = cols[i].astype(str)
x = pd.DataFrame(x[:, 1:])
x.columns = ['delim', 'prior', 'posterior']
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
x["nspecies"] = nspecies
res.append(x)
return res | def function[_parse_01, parameter[ofiles, individual]]:
constant[
a subfunction for summarizing results
]
variable[cols] assign[=] list[[]]
variable[dats] assign[=] list[[]]
for taget[name[ofile]] in starred[name[ofiles]] begin[:]
with call[name[open], parameter[name[ofile]]] begin[:]
variable[dat] assign[=] call[name[infile].read, parameter[]]
variable[lastbits] assign[=] call[call[name[dat].split, parameter[constant[.mcmc.txt
]]]][<ast.Slice object at 0x7da2044c0400>]
variable[results] assign[=] call[call[call[call[name[lastbits]][constant[0]].split, parameter[constant[
]]]][constant[0]].split, parameter[]]
variable[shape] assign[=] tuple[[<ast.BinOp object at 0x7da2044c1de0>, <ast.Constant object at 0x7da2044c2680>]]
variable[dat] assign[=] call[call[name[np].array, parameter[call[name[results]][<ast.Slice object at 0x7da2044c3670>]]].reshape, parameter[name[shape]]]
call[name[cols].append, parameter[call[call[name[dat]][tuple[[<ast.Slice object at 0x7da2044c2110>, <ast.Constant object at 0x7da2044c1420>]]].astype, parameter[name[float]]]]]
if <ast.UnaryOp object at 0x7da2044c05b0> begin[:]
variable[cols] assign[=] call[name[np].array, parameter[name[cols]]]
variable[cols] assign[=] binary_operation[call[name[cols].sum, parameter[]] / call[name[len], parameter[name[ofiles]]]]
call[name[dat]][tuple[[<ast.Slice object at 0x7da2044c3d60>, <ast.Constant object at 0x7da2044c3940>]]] assign[=] call[name[cols].astype, parameter[name[str]]]
variable[df] assign[=] call[name[pd].DataFrame, parameter[call[name[dat]][tuple[[<ast.Slice object at 0x7da2044c0e50>, <ast.Slice object at 0x7da2044c1db0>]]]]]
name[df].columns assign[=] list[[<ast.Constant object at 0x7da2044c3250>, <ast.Constant object at 0x7da2044c2800>, <ast.Constant object at 0x7da2044c0340>]]
variable[nspecies] assign[=] binary_operation[constant[1] + call[call[name[np].array, parameter[<ast.ListComp object at 0x7da20c6c79a0>]].sum, parameter[]]]
call[name[df]][constant[nspecies]] assign[=] name[nspecies]
return[name[df]] | keyword[def] identifier[_parse_01] ( identifier[ofiles] , identifier[individual] = keyword[False] ):
literal[string]
identifier[cols] =[]
identifier[dats] =[]
keyword[for] identifier[ofile] keyword[in] identifier[ofiles] :
keyword[with] identifier[open] ( identifier[ofile] ) keyword[as] identifier[infile] :
identifier[dat] = identifier[infile] . identifier[read] ()
identifier[lastbits] = identifier[dat] . identifier[split] ( literal[string] )[ literal[int] :]
identifier[results] = identifier[lastbits] [ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ()
identifier[shape] =((( identifier[len] ( identifier[results] )- literal[int] )/ literal[int] ), literal[int] )
identifier[dat] = identifier[np] . identifier[array] ( identifier[results] [ literal[int] :]). identifier[reshape] ( identifier[shape] )
identifier[cols] . identifier[append] ( identifier[dat] [:, literal[int] ]. identifier[astype] ( identifier[float] ))
keyword[if] keyword[not] identifier[individual] :
identifier[cols] = identifier[np] . identifier[array] ( identifier[cols] )
identifier[cols] = identifier[cols] . identifier[sum] ( identifier[axis] = literal[int] )/ identifier[len] ( identifier[ofiles] )
identifier[dat] [:, literal[int] ]= identifier[cols] . identifier[astype] ( identifier[str] )
identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[dat] [:, literal[int] :])
identifier[df] . identifier[columns] =[ literal[string] , literal[string] , literal[string] ]
identifier[nspecies] = literal[int] + identifier[np] . identifier[array] ([ identifier[list] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[dat] [:, literal[int] ]], identifier[dtype] = identifier[int] ). identifier[sum] ( identifier[axis] = literal[int] )
identifier[df] [ literal[string] ]= identifier[nspecies]
keyword[return] identifier[df]
keyword[else] :
identifier[res] =[]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[cols] )):
identifier[x] = identifier[dat]
identifier[x] [:, literal[int] ]= identifier[cols] [ identifier[i] ]. identifier[astype] ( identifier[str] )
identifier[x] = identifier[pd] . identifier[DataFrame] ( identifier[x] [:, literal[int] :])
identifier[x] . identifier[columns] =[ literal[string] , literal[string] , literal[string] ]
identifier[nspecies] = literal[int] + identifier[np] . identifier[array] ([ identifier[list] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[dat] [:, literal[int] ]], identifier[dtype] = identifier[int] ). identifier[sum] ( identifier[axis] = literal[int] )
identifier[x] [ literal[string] ]= identifier[nspecies]
identifier[res] . identifier[append] ( identifier[x] )
keyword[return] identifier[res] | def _parse_01(ofiles, individual=False):
"""
a subfunction for summarizing results
"""
## parse results from outfiles
cols = []
dats = []
for ofile in ofiles:
## parse file
with open(ofile) as infile:
dat = infile.read() # depends on [control=['with'], data=['infile']]
lastbits = dat.split('.mcmc.txt\n\n')[1:]
results = lastbits[0].split('\n\n')[0].split()
## get shape from ...
shape = ((len(results) - 3) / 4, 4)
dat = np.array(results[3:]).reshape(shape)
cols.append(dat[:, 3].astype(float)) # depends on [control=['for'], data=['ofile']]
if not individual:
## get mean results across reps
cols = np.array(cols)
cols = cols.sum(axis=0) / len(ofiles) #10.
dat[:, 3] = cols.astype(str)
## format as a DF
df = pd.DataFrame(dat[:, 1:])
df.columns = ['delim', 'prior', 'posterior']
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
df['nspecies'] = nspecies
return df # depends on [control=['if'], data=[]]
else:
## get mean results across reps
#return cols
res = []
for i in xrange(len(cols)):
x = dat
x[:, 3] = cols[i].astype(str)
x = pd.DataFrame(x[:, 1:])
x.columns = ['delim', 'prior', 'posterior']
nspecies = 1 + np.array([list(i) for i in dat[:, 1]], dtype=int).sum(axis=1)
x['nspecies'] = nspecies
res.append(x) # depends on [control=['for'], data=['i']]
return res |
def _sub_hostname(self, line):
'''
This will replace the exact hostname and all instances of the domain name with the obfuscated alternatives.
Example:
'''
try:
for od,d in self.dn_db.items():
#regex = re.compile(r'\w*\.%s' % d)
regex = re.compile(r'(?![\W\-\:\ \.])[a-zA-Z0-9\-\_\.]*\.%s' % d)
hostnames = [each for each in regex.findall(line)]
if len(hostnames) > 0:
for hn in hostnames:
new_hn = self._hn2db(hn)
self.logger.debug("Obfuscating FQDN - %s > %s", hn, new_hn)
line = line.replace(hn, new_hn)
if self.hostname:
line = line.replace(self.hostname, self._hn2db(self.hostname)) #catch any non-fqdn instances of the system hostname
return line
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception('SubHostnameError: Unable to Substitute Hostname/Domainname') | def function[_sub_hostname, parameter[self, line]]:
constant[
This will replace the exact hostname and all instances of the domain name with the obfuscated alternatives.
Example:
]
<ast.Try object at 0x7da18dc98670> | keyword[def] identifier[_sub_hostname] ( identifier[self] , identifier[line] ):
literal[string]
keyword[try] :
keyword[for] identifier[od] , identifier[d] keyword[in] identifier[self] . identifier[dn_db] . identifier[items] ():
identifier[regex] = identifier[re] . identifier[compile] ( literal[string] % identifier[d] )
identifier[hostnames] =[ identifier[each] keyword[for] identifier[each] keyword[in] identifier[regex] . identifier[findall] ( identifier[line] )]
keyword[if] identifier[len] ( identifier[hostnames] )> literal[int] :
keyword[for] identifier[hn] keyword[in] identifier[hostnames] :
identifier[new_hn] = identifier[self] . identifier[_hn2db] ( identifier[hn] )
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] , identifier[hn] , identifier[new_hn] )
identifier[line] = identifier[line] . identifier[replace] ( identifier[hn] , identifier[new_hn] )
keyword[if] identifier[self] . identifier[hostname] :
identifier[line] = identifier[line] . identifier[replace] ( identifier[self] . identifier[hostname] , identifier[self] . identifier[_hn2db] ( identifier[self] . identifier[hostname] ))
keyword[return] identifier[line]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[exception] ( identifier[e] )
keyword[raise] identifier[Exception] ( literal[string] ) | def _sub_hostname(self, line):
"""
This will replace the exact hostname and all instances of the domain name with the obfuscated alternatives.
Example:
"""
try:
for (od, d) in self.dn_db.items():
#regex = re.compile(r'\w*\.%s' % d)
regex = re.compile('(?![\\W\\-\\:\\ \\.])[a-zA-Z0-9\\-\\_\\.]*\\.%s' % d)
hostnames = [each for each in regex.findall(line)]
if len(hostnames) > 0:
for hn in hostnames:
new_hn = self._hn2db(hn)
self.logger.debug('Obfuscating FQDN - %s > %s', hn, new_hn)
line = line.replace(hn, new_hn) # depends on [control=['for'], data=['hn']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if self.hostname:
line = line.replace(self.hostname, self._hn2db(self.hostname)) #catch any non-fqdn instances of the system hostname # depends on [control=['if'], data=[]]
return line # depends on [control=['try'], data=[]]
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception('SubHostnameError: Unable to Substitute Hostname/Domainname') # depends on [control=['except'], data=['e']] |
def get_command_hook(command, task_active=True):
""" Gets registered command ``Plugin`` instance for the provided
command.
`command`
Command string registered to a plugin.
`task_active`
Set to ``False`` to indicate no active tasks.
Returns ``Plugin`` instance or ``None``.
"""
plugin_obj = _command_hooks.get(command)
if plugin_obj:
if task_active or (not plugin_obj.options and
not plugin_obj.task_only):
if not _is_plugin_disabled(plugin_obj):
return plugin_obj
return None | def function[get_command_hook, parameter[command, task_active]]:
constant[ Gets registered command ``Plugin`` instance for the provided
command.
`command`
Command string registered to a plugin.
`task_active`
Set to ``False`` to indicate no active tasks.
Returns ``Plugin`` instance or ``None``.
]
variable[plugin_obj] assign[=] call[name[_command_hooks].get, parameter[name[command]]]
if name[plugin_obj] begin[:]
if <ast.BoolOp object at 0x7da1b158a680> begin[:]
if <ast.UnaryOp object at 0x7da1b1589480> begin[:]
return[name[plugin_obj]]
return[constant[None]] | keyword[def] identifier[get_command_hook] ( identifier[command] , identifier[task_active] = keyword[True] ):
literal[string]
identifier[plugin_obj] = identifier[_command_hooks] . identifier[get] ( identifier[command] )
keyword[if] identifier[plugin_obj] :
keyword[if] identifier[task_active] keyword[or] ( keyword[not] identifier[plugin_obj] . identifier[options] keyword[and]
keyword[not] identifier[plugin_obj] . identifier[task_only] ):
keyword[if] keyword[not] identifier[_is_plugin_disabled] ( identifier[plugin_obj] ):
keyword[return] identifier[plugin_obj]
keyword[return] keyword[None] | def get_command_hook(command, task_active=True):
""" Gets registered command ``Plugin`` instance for the provided
command.
`command`
Command string registered to a plugin.
`task_active`
Set to ``False`` to indicate no active tasks.
Returns ``Plugin`` instance or ``None``.
"""
plugin_obj = _command_hooks.get(command)
if plugin_obj:
if task_active or (not plugin_obj.options and (not plugin_obj.task_only)):
if not _is_plugin_disabled(plugin_obj):
return plugin_obj # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return None |
def draw(canvas, mol):
"""Draw molecule structure image.
Args:
canvas: draw.drawable.Drawable
mol: model.graphmol.Compound
"""
mol.require("ScaleAndCenter")
mlb = mol.size2d[2]
if not mol.atom_count():
return
bond_type_fn = {
1: {
0: single_bond,
1: wedged_single,
2: dashed_wedged_single,
3: wave_single,
}, 2: {
0: cw_double,
1: counter_cw_double,
2: double_bond,
3: cross_double
}, 3: {
0: triple_bond
}
}
# Draw bonds
for u, v, bond in mol.bonds_iter():
if not bond.visible:
continue
if (u < v) == bond.is_lower_first:
f, s = (u, v)
else:
s, f = (u, v)
p1 = mol.atom(f).coords
p2 = mol.atom(s).coords
if p1 == p2:
continue # avoid zero division
if mol.atom(f).visible:
p1 = gm.t_seg(p1, p2, F_AOVL, 2)[0]
if mol.atom(s).visible:
p2 = gm.t_seg(p1, p2, F_AOVL, 1)[1]
color1 = mol.atom(f).color
color2 = mol.atom(s).color
bond_type_fn[bond.order][bond.type](
canvas, p1, p2, color1, color2, mlb)
# Draw atoms
for n, atom in mol.atoms_iter():
if not atom.visible:
continue
p = atom.coords
color = atom.color
# Determine text direction
if atom.H_count:
cosnbrs = []
hrzn = (p[0] + 1, p[1])
for nbr in mol.graph.neighbors(n):
pnbr = mol.atom(nbr).coords
try:
cosnbrs.append(gm.dot_product(hrzn, pnbr, p) /
gm.distance(p, pnbr))
except ZeroDivisionError:
pass
if not cosnbrs or min(cosnbrs) > 0:
# [atom]< or isolated node(ex. H2O, HCl)
text = atom.formula_html(True)
canvas.draw_text(p, text, color, "right")
continue
elif max(cosnbrs) < 0:
# >[atom]
text = atom.formula_html()
canvas.draw_text(p, text, color, "left")
continue
# -[atom]- or no hydrogens
text = atom.formula_html()
canvas.draw_text(p, text, color, "center") | def function[draw, parameter[canvas, mol]]:
constant[Draw molecule structure image.
Args:
canvas: draw.drawable.Drawable
mol: model.graphmol.Compound
]
call[name[mol].require, parameter[constant[ScaleAndCenter]]]
variable[mlb] assign[=] call[name[mol].size2d][constant[2]]
if <ast.UnaryOp object at 0x7da1b2428340> begin[:]
return[None]
variable[bond_type_fn] assign[=] dictionary[[<ast.Constant object at 0x7da1b24297b0>, <ast.Constant object at 0x7da1b2429840>, <ast.Constant object at 0x7da1b2429870>], [<ast.Dict object at 0x7da1b24298a0>, <ast.Dict object at 0x7da1b2429780>, <ast.Dict object at 0x7da1b242a260>]]
for taget[tuple[[<ast.Name object at 0x7da1b242a2f0>, <ast.Name object at 0x7da1b242a3e0>, <ast.Name object at 0x7da1b242a410>]]] in starred[call[name[mol].bonds_iter, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b242a3b0> begin[:]
continue
if compare[compare[name[u] less[<] name[v]] equal[==] name[bond].is_lower_first] begin[:]
<ast.Tuple object at 0x7da1b242a0e0> assign[=] tuple[[<ast.Name object at 0x7da1b242a050>, <ast.Name object at 0x7da1b242a080>]]
variable[p1] assign[=] call[name[mol].atom, parameter[name[f]]].coords
variable[p2] assign[=] call[name[mol].atom, parameter[name[s]]].coords
if compare[name[p1] equal[==] name[p2]] begin[:]
continue
if call[name[mol].atom, parameter[name[f]]].visible begin[:]
variable[p1] assign[=] call[call[name[gm].t_seg, parameter[name[p1], name[p2], name[F_AOVL], constant[2]]]][constant[0]]
if call[name[mol].atom, parameter[name[s]]].visible begin[:]
variable[p2] assign[=] call[call[name[gm].t_seg, parameter[name[p1], name[p2], name[F_AOVL], constant[1]]]][constant[1]]
variable[color1] assign[=] call[name[mol].atom, parameter[name[f]]].color
variable[color2] assign[=] call[name[mol].atom, parameter[name[s]]].color
call[call[call[name[bond_type_fn]][name[bond].order]][name[bond].type], parameter[name[canvas], name[p1], name[p2], name[color1], name[color2], name[mlb]]]
for taget[tuple[[<ast.Name object at 0x7da1b242b1f0>, <ast.Name object at 0x7da1b242b100>]]] in starred[call[name[mol].atoms_iter, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b242b250> begin[:]
continue
variable[p] assign[=] name[atom].coords
variable[color] assign[=] name[atom].color
if name[atom].H_count begin[:]
variable[cosnbrs] assign[=] list[[]]
variable[hrzn] assign[=] tuple[[<ast.BinOp object at 0x7da1b242b640>, <ast.Subscript object at 0x7da1b242b730>]]
for taget[name[nbr]] in starred[call[name[mol].graph.neighbors, parameter[name[n]]]] begin[:]
variable[pnbr] assign[=] call[name[mol].atom, parameter[name[nbr]]].coords
<ast.Try object at 0x7da1b242ba60>
if <ast.BoolOp object at 0x7da1b242bfd0> begin[:]
variable[text] assign[=] call[name[atom].formula_html, parameter[constant[True]]]
call[name[canvas].draw_text, parameter[name[p], name[text], name[color], constant[right]]]
continue
variable[text] assign[=] call[name[atom].formula_html, parameter[]]
call[name[canvas].draw_text, parameter[name[p], name[text], name[color], constant[center]]] | keyword[def] identifier[draw] ( identifier[canvas] , identifier[mol] ):
literal[string]
identifier[mol] . identifier[require] ( literal[string] )
identifier[mlb] = identifier[mol] . identifier[size2d] [ literal[int] ]
keyword[if] keyword[not] identifier[mol] . identifier[atom_count] ():
keyword[return]
identifier[bond_type_fn] ={
literal[int] :{
literal[int] : identifier[single_bond] ,
literal[int] : identifier[wedged_single] ,
literal[int] : identifier[dashed_wedged_single] ,
literal[int] : identifier[wave_single] ,
}, literal[int] :{
literal[int] : identifier[cw_double] ,
literal[int] : identifier[counter_cw_double] ,
literal[int] : identifier[double_bond] ,
literal[int] : identifier[cross_double]
}, literal[int] :{
literal[int] : identifier[triple_bond]
}
}
keyword[for] identifier[u] , identifier[v] , identifier[bond] keyword[in] identifier[mol] . identifier[bonds_iter] ():
keyword[if] keyword[not] identifier[bond] . identifier[visible] :
keyword[continue]
keyword[if] ( identifier[u] < identifier[v] )== identifier[bond] . identifier[is_lower_first] :
identifier[f] , identifier[s] =( identifier[u] , identifier[v] )
keyword[else] :
identifier[s] , identifier[f] =( identifier[u] , identifier[v] )
identifier[p1] = identifier[mol] . identifier[atom] ( identifier[f] ). identifier[coords]
identifier[p2] = identifier[mol] . identifier[atom] ( identifier[s] ). identifier[coords]
keyword[if] identifier[p1] == identifier[p2] :
keyword[continue]
keyword[if] identifier[mol] . identifier[atom] ( identifier[f] ). identifier[visible] :
identifier[p1] = identifier[gm] . identifier[t_seg] ( identifier[p1] , identifier[p2] , identifier[F_AOVL] , literal[int] )[ literal[int] ]
keyword[if] identifier[mol] . identifier[atom] ( identifier[s] ). identifier[visible] :
identifier[p2] = identifier[gm] . identifier[t_seg] ( identifier[p1] , identifier[p2] , identifier[F_AOVL] , literal[int] )[ literal[int] ]
identifier[color1] = identifier[mol] . identifier[atom] ( identifier[f] ). identifier[color]
identifier[color2] = identifier[mol] . identifier[atom] ( identifier[s] ). identifier[color]
identifier[bond_type_fn] [ identifier[bond] . identifier[order] ][ identifier[bond] . identifier[type] ](
identifier[canvas] , identifier[p1] , identifier[p2] , identifier[color1] , identifier[color2] , identifier[mlb] )
keyword[for] identifier[n] , identifier[atom] keyword[in] identifier[mol] . identifier[atoms_iter] ():
keyword[if] keyword[not] identifier[atom] . identifier[visible] :
keyword[continue]
identifier[p] = identifier[atom] . identifier[coords]
identifier[color] = identifier[atom] . identifier[color]
keyword[if] identifier[atom] . identifier[H_count] :
identifier[cosnbrs] =[]
identifier[hrzn] =( identifier[p] [ literal[int] ]+ literal[int] , identifier[p] [ literal[int] ])
keyword[for] identifier[nbr] keyword[in] identifier[mol] . identifier[graph] . identifier[neighbors] ( identifier[n] ):
identifier[pnbr] = identifier[mol] . identifier[atom] ( identifier[nbr] ). identifier[coords]
keyword[try] :
identifier[cosnbrs] . identifier[append] ( identifier[gm] . identifier[dot_product] ( identifier[hrzn] , identifier[pnbr] , identifier[p] )/
identifier[gm] . identifier[distance] ( identifier[p] , identifier[pnbr] ))
keyword[except] identifier[ZeroDivisionError] :
keyword[pass]
keyword[if] keyword[not] identifier[cosnbrs] keyword[or] identifier[min] ( identifier[cosnbrs] )> literal[int] :
identifier[text] = identifier[atom] . identifier[formula_html] ( keyword[True] )
identifier[canvas] . identifier[draw_text] ( identifier[p] , identifier[text] , identifier[color] , literal[string] )
keyword[continue]
keyword[elif] identifier[max] ( identifier[cosnbrs] )< literal[int] :
identifier[text] = identifier[atom] . identifier[formula_html] ()
identifier[canvas] . identifier[draw_text] ( identifier[p] , identifier[text] , identifier[color] , literal[string] )
keyword[continue]
identifier[text] = identifier[atom] . identifier[formula_html] ()
identifier[canvas] . identifier[draw_text] ( identifier[p] , identifier[text] , identifier[color] , literal[string] ) | def draw(canvas, mol):
"""Draw molecule structure image.
Args:
canvas: draw.drawable.Drawable
mol: model.graphmol.Compound
"""
mol.require('ScaleAndCenter')
mlb = mol.size2d[2]
if not mol.atom_count():
return # depends on [control=['if'], data=[]]
bond_type_fn = {1: {0: single_bond, 1: wedged_single, 2: dashed_wedged_single, 3: wave_single}, 2: {0: cw_double, 1: counter_cw_double, 2: double_bond, 3: cross_double}, 3: {0: triple_bond}}
# Draw bonds
for (u, v, bond) in mol.bonds_iter():
if not bond.visible:
continue # depends on [control=['if'], data=[]]
if (u < v) == bond.is_lower_first:
(f, s) = (u, v) # depends on [control=['if'], data=[]]
else:
(s, f) = (u, v)
p1 = mol.atom(f).coords
p2 = mol.atom(s).coords
if p1 == p2:
continue # avoid zero division # depends on [control=['if'], data=[]]
if mol.atom(f).visible:
p1 = gm.t_seg(p1, p2, F_AOVL, 2)[0] # depends on [control=['if'], data=[]]
if mol.atom(s).visible:
p2 = gm.t_seg(p1, p2, F_AOVL, 1)[1] # depends on [control=['if'], data=[]]
color1 = mol.atom(f).color
color2 = mol.atom(s).color
bond_type_fn[bond.order][bond.type](canvas, p1, p2, color1, color2, mlb) # depends on [control=['for'], data=[]]
# Draw atoms
for (n, atom) in mol.atoms_iter():
if not atom.visible:
continue # depends on [control=['if'], data=[]]
p = atom.coords
color = atom.color
# Determine text direction
if atom.H_count:
cosnbrs = []
hrzn = (p[0] + 1, p[1])
for nbr in mol.graph.neighbors(n):
pnbr = mol.atom(nbr).coords
try:
cosnbrs.append(gm.dot_product(hrzn, pnbr, p) / gm.distance(p, pnbr)) # depends on [control=['try'], data=[]]
except ZeroDivisionError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['nbr']]
if not cosnbrs or min(cosnbrs) > 0:
# [atom]< or isolated node(ex. H2O, HCl)
text = atom.formula_html(True)
canvas.draw_text(p, text, color, 'right')
continue # depends on [control=['if'], data=[]]
elif max(cosnbrs) < 0:
# >[atom]
text = atom.formula_html()
canvas.draw_text(p, text, color, 'left')
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# -[atom]- or no hydrogens
text = atom.formula_html()
canvas.draw_text(p, text, color, 'center') # depends on [control=['for'], data=[]] |
def doDynamicValidation(self, request: Request):
"""
State based validation
"""
self.execute_hook(NodeHooks.PRE_DYNAMIC_VALIDATION, request=request)
# Digest validation
ledger_id, seq_no = self.seqNoDB.get_by_payload_digest(request.payload_digest)
if ledger_id is not None and seq_no is not None:
raise SuspiciousPrePrepare('Trying to order already ordered request')
ledger = self.getLedger(self.ledger_id_for_request(request))
for txn in ledger.uncommittedTxns:
if get_payload_digest(txn) == request.payload_digest:
raise SuspiciousPrePrepare('Trying to order already ordered request')
operation = request.operation
req_handler = self.get_req_handler(txn_type=operation[TXN_TYPE])
req_handler.validate(request)
self.execute_hook(NodeHooks.POST_DYNAMIC_VALIDATION, request=request) | def function[doDynamicValidation, parameter[self, request]]:
constant[
State based validation
]
call[name[self].execute_hook, parameter[name[NodeHooks].PRE_DYNAMIC_VALIDATION]]
<ast.Tuple object at 0x7da204960490> assign[=] call[name[self].seqNoDB.get_by_payload_digest, parameter[name[request].payload_digest]]
if <ast.BoolOp object at 0x7da1b170c730> begin[:]
<ast.Raise object at 0x7da1b170cca0>
variable[ledger] assign[=] call[name[self].getLedger, parameter[call[name[self].ledger_id_for_request, parameter[name[request]]]]]
for taget[name[txn]] in starred[name[ledger].uncommittedTxns] begin[:]
if compare[call[name[get_payload_digest], parameter[name[txn]]] equal[==] name[request].payload_digest] begin[:]
<ast.Raise object at 0x7da1b170d3f0>
variable[operation] assign[=] name[request].operation
variable[req_handler] assign[=] call[name[self].get_req_handler, parameter[]]
call[name[req_handler].validate, parameter[name[request]]]
call[name[self].execute_hook, parameter[name[NodeHooks].POST_DYNAMIC_VALIDATION]] | keyword[def] identifier[doDynamicValidation] ( identifier[self] , identifier[request] : identifier[Request] ):
literal[string]
identifier[self] . identifier[execute_hook] ( identifier[NodeHooks] . identifier[PRE_DYNAMIC_VALIDATION] , identifier[request] = identifier[request] )
identifier[ledger_id] , identifier[seq_no] = identifier[self] . identifier[seqNoDB] . identifier[get_by_payload_digest] ( identifier[request] . identifier[payload_digest] )
keyword[if] identifier[ledger_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[seq_no] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[SuspiciousPrePrepare] ( literal[string] )
identifier[ledger] = identifier[self] . identifier[getLedger] ( identifier[self] . identifier[ledger_id_for_request] ( identifier[request] ))
keyword[for] identifier[txn] keyword[in] identifier[ledger] . identifier[uncommittedTxns] :
keyword[if] identifier[get_payload_digest] ( identifier[txn] )== identifier[request] . identifier[payload_digest] :
keyword[raise] identifier[SuspiciousPrePrepare] ( literal[string] )
identifier[operation] = identifier[request] . identifier[operation]
identifier[req_handler] = identifier[self] . identifier[get_req_handler] ( identifier[txn_type] = identifier[operation] [ identifier[TXN_TYPE] ])
identifier[req_handler] . identifier[validate] ( identifier[request] )
identifier[self] . identifier[execute_hook] ( identifier[NodeHooks] . identifier[POST_DYNAMIC_VALIDATION] , identifier[request] = identifier[request] ) | def doDynamicValidation(self, request: Request):
"""
State based validation
"""
self.execute_hook(NodeHooks.PRE_DYNAMIC_VALIDATION, request=request)
# Digest validation
(ledger_id, seq_no) = self.seqNoDB.get_by_payload_digest(request.payload_digest)
if ledger_id is not None and seq_no is not None:
raise SuspiciousPrePrepare('Trying to order already ordered request') # depends on [control=['if'], data=[]]
ledger = self.getLedger(self.ledger_id_for_request(request))
for txn in ledger.uncommittedTxns:
if get_payload_digest(txn) == request.payload_digest:
raise SuspiciousPrePrepare('Trying to order already ordered request') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['txn']]
operation = request.operation
req_handler = self.get_req_handler(txn_type=operation[TXN_TYPE])
req_handler.validate(request)
self.execute_hook(NodeHooks.POST_DYNAMIC_VALIDATION, request=request) |
def options(self, context, module_options):
'''
INJECT If set to true, this allows PowerView to work over 'stealthier' execution methods which have non-interactive contexts (e.g. WMI) (default: True)
'''
self.exec_methods = ['smbexec', 'atexec']
self.inject = True
if 'INJECT' in module_options:
self.inject = bool(module_options['INJECT'])
if self.inject: self.exec_methods = None
self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1')
self.ps_script2 = obfs_ps_script('powersploit/Recon/PowerView.ps1') | def function[options, parameter[self, context, module_options]]:
constant[
INJECT If set to true, this allows PowerView to work over 'stealthier' execution methods which have non-interactive contexts (e.g. WMI) (default: True)
]
name[self].exec_methods assign[=] list[[<ast.Constant object at 0x7da1b2123430>, <ast.Constant object at 0x7da1b2121030>]]
name[self].inject assign[=] constant[True]
if compare[constant[INJECT] in name[module_options]] begin[:]
name[self].inject assign[=] call[name[bool], parameter[call[name[module_options]][constant[INJECT]]]]
if name[self].inject begin[:]
name[self].exec_methods assign[=] constant[None]
name[self].ps_script1 assign[=] call[name[obfs_ps_script], parameter[constant[cme_powershell_scripts/Invoke-PSInject.ps1]]]
name[self].ps_script2 assign[=] call[name[obfs_ps_script], parameter[constant[powersploit/Recon/PowerView.ps1]]] | keyword[def] identifier[options] ( identifier[self] , identifier[context] , identifier[module_options] ):
literal[string]
identifier[self] . identifier[exec_methods] =[ literal[string] , literal[string] ]
identifier[self] . identifier[inject] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[module_options] :
identifier[self] . identifier[inject] = identifier[bool] ( identifier[module_options] [ literal[string] ])
keyword[if] identifier[self] . identifier[inject] : identifier[self] . identifier[exec_methods] = keyword[None]
identifier[self] . identifier[ps_script1] = identifier[obfs_ps_script] ( literal[string] )
identifier[self] . identifier[ps_script2] = identifier[obfs_ps_script] ( literal[string] ) | def options(self, context, module_options):
"""
INJECT If set to true, this allows PowerView to work over 'stealthier' execution methods which have non-interactive contexts (e.g. WMI) (default: True)
"""
self.exec_methods = ['smbexec', 'atexec']
self.inject = True
if 'INJECT' in module_options:
self.inject = bool(module_options['INJECT']) # depends on [control=['if'], data=['module_options']]
if self.inject:
self.exec_methods = None # depends on [control=['if'], data=[]]
self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1')
self.ps_script2 = obfs_ps_script('powersploit/Recon/PowerView.ps1') |
def fit(self):
"""
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
"""
# the objective function that will be minimized in the least square
# fitting
objective_func = lambda pars, x, y: y - self._func(x, pars)
self._params = self._initial_guess()
self.eos_params, ierr = leastsq(
objective_func, self._params, args=(self.volumes, self.energies))
# e0, b0, b1, v0
self._params = self.eos_params
if ierr not in [1, 2, 3, 4]:
raise EOSError("Optimal parameters not found") | def function[fit, parameter[self]]:
constant[
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
]
variable[objective_func] assign[=] <ast.Lambda object at 0x7da18dc9bf10>
name[self]._params assign[=] call[name[self]._initial_guess, parameter[]]
<ast.Tuple object at 0x7da18dc99a80> assign[=] call[name[leastsq], parameter[name[objective_func], name[self]._params]]
name[self]._params assign[=] name[self].eos_params
if compare[name[ierr] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18dc9ac20>, <ast.Constant object at 0x7da18dc98f10>, <ast.Constant object at 0x7da18dc9be80>, <ast.Constant object at 0x7da18dc9b9d0>]]] begin[:]
<ast.Raise object at 0x7da18dc98490> | keyword[def] identifier[fit] ( identifier[self] ):
literal[string]
identifier[objective_func] = keyword[lambda] identifier[pars] , identifier[x] , identifier[y] : identifier[y] - identifier[self] . identifier[_func] ( identifier[x] , identifier[pars] )
identifier[self] . identifier[_params] = identifier[self] . identifier[_initial_guess] ()
identifier[self] . identifier[eos_params] , identifier[ierr] = identifier[leastsq] (
identifier[objective_func] , identifier[self] . identifier[_params] , identifier[args] =( identifier[self] . identifier[volumes] , identifier[self] . identifier[energies] ))
identifier[self] . identifier[_params] = identifier[self] . identifier[eos_params]
keyword[if] identifier[ierr] keyword[not] keyword[in] [ literal[int] , literal[int] , literal[int] , literal[int] ]:
keyword[raise] identifier[EOSError] ( literal[string] ) | def fit(self):
"""
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
"""
# the objective function that will be minimized in the least square
# fitting
objective_func = lambda pars, x, y: y - self._func(x, pars)
self._params = self._initial_guess()
(self.eos_params, ierr) = leastsq(objective_func, self._params, args=(self.volumes, self.energies))
# e0, b0, b1, v0
self._params = self.eos_params
if ierr not in [1, 2, 3, 4]:
raise EOSError('Optimal parameters not found') # depends on [control=['if'], data=[]] |
def _flattenComponent(glyphSet, component):
"""Returns a list of tuples (baseGlyph, transform) of nested component."""
glyph = glyphSet[component.baseGlyph]
if not glyph.components:
transformation = Transform(*component.transformation)
return [(component.baseGlyph, transformation)]
all_flattened_components = []
for nested in glyph.components:
flattened_components = _flattenComponent(glyphSet, nested)
for i, (_, tr) in enumerate(flattened_components):
tr = tr.transform(component.transformation)
flattened_components[i] = (flattened_components[i][0], tr)
all_flattened_components.extend(flattened_components)
return all_flattened_components | def function[_flattenComponent, parameter[glyphSet, component]]:
constant[Returns a list of tuples (baseGlyph, transform) of nested component.]
variable[glyph] assign[=] call[name[glyphSet]][name[component].baseGlyph]
if <ast.UnaryOp object at 0x7da20c6e78e0> begin[:]
variable[transformation] assign[=] call[name[Transform], parameter[<ast.Starred object at 0x7da20c6e50f0>]]
return[list[[<ast.Tuple object at 0x7da20c6e6d40>]]]
variable[all_flattened_components] assign[=] list[[]]
for taget[name[nested]] in starred[name[glyph].components] begin[:]
variable[flattened_components] assign[=] call[name[_flattenComponent], parameter[name[glyphSet], name[nested]]]
for taget[tuple[[<ast.Name object at 0x7da20c6e6650>, <ast.Tuple object at 0x7da1b0efec20>]]] in starred[call[name[enumerate], parameter[name[flattened_components]]]] begin[:]
variable[tr] assign[=] call[name[tr].transform, parameter[name[component].transformation]]
call[name[flattened_components]][name[i]] assign[=] tuple[[<ast.Subscript object at 0x7da1b0efe620>, <ast.Name object at 0x7da1b0efd660>]]
call[name[all_flattened_components].extend, parameter[name[flattened_components]]]
return[name[all_flattened_components]] | keyword[def] identifier[_flattenComponent] ( identifier[glyphSet] , identifier[component] ):
literal[string]
identifier[glyph] = identifier[glyphSet] [ identifier[component] . identifier[baseGlyph] ]
keyword[if] keyword[not] identifier[glyph] . identifier[components] :
identifier[transformation] = identifier[Transform] (* identifier[component] . identifier[transformation] )
keyword[return] [( identifier[component] . identifier[baseGlyph] , identifier[transformation] )]
identifier[all_flattened_components] =[]
keyword[for] identifier[nested] keyword[in] identifier[glyph] . identifier[components] :
identifier[flattened_components] = identifier[_flattenComponent] ( identifier[glyphSet] , identifier[nested] )
keyword[for] identifier[i] ,( identifier[_] , identifier[tr] ) keyword[in] identifier[enumerate] ( identifier[flattened_components] ):
identifier[tr] = identifier[tr] . identifier[transform] ( identifier[component] . identifier[transformation] )
identifier[flattened_components] [ identifier[i] ]=( identifier[flattened_components] [ identifier[i] ][ literal[int] ], identifier[tr] )
identifier[all_flattened_components] . identifier[extend] ( identifier[flattened_components] )
keyword[return] identifier[all_flattened_components] | def _flattenComponent(glyphSet, component):
"""Returns a list of tuples (baseGlyph, transform) of nested component."""
glyph = glyphSet[component.baseGlyph]
if not glyph.components:
transformation = Transform(*component.transformation)
return [(component.baseGlyph, transformation)] # depends on [control=['if'], data=[]]
all_flattened_components = []
for nested in glyph.components:
flattened_components = _flattenComponent(glyphSet, nested)
for (i, (_, tr)) in enumerate(flattened_components):
tr = tr.transform(component.transformation)
flattened_components[i] = (flattened_components[i][0], tr) # depends on [control=['for'], data=[]]
all_flattened_components.extend(flattened_components) # depends on [control=['for'], data=['nested']]
return all_flattened_components |
def to_root(df, path, key='my_ttree', mode='w', store_index=True, *args, **kwargs):
"""
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
"""
if mode == 'a':
mode = 'update'
elif mode == 'w':
mode = 'recreate'
else:
raise ValueError('Unknown mode: {}. Must be "a" or "w".'.format(mode))
from root_numpy import array2tree
# We don't want to modify the user's DataFrame here, so we make a shallow copy
df_ = df.copy(deep=False)
if store_index:
name = df_.index.name
if name is None:
# Handle the case where the index has no name
name = ''
df_['__index__' + name] = df_.index
# Convert categorical columns into something root_numpy can serialise
for col in df_.select_dtypes(['category']).columns:
name_components = ['__rpCaT', col, str(df_[col].cat.ordered)]
name_components.extend(df_[col].cat.categories)
if ['*' not in c for c in name_components]:
sep = '*'
else:
raise ValueError('Unable to find suitable separator for columns')
df_[col] = df_[col].cat.codes
df_.rename(index=str, columns={col: sep.join(name_components)}, inplace=True)
arr = df_.to_records(index=False)
root_file = ROOT.TFile.Open(path, mode)
if not root_file:
raise IOError("cannot open file {0}".format(path))
if not root_file.IsWritable():
raise IOError("file {0} is not writable".format(path))
# Navigate to the requested directory
open_dirs = [root_file]
for dir_name in key.split('/')[:-1]:
current_dir = open_dirs[-1].Get(dir_name)
if not current_dir:
current_dir = open_dirs[-1].mkdir(dir_name)
current_dir.cd()
open_dirs.append(current_dir)
# The key is now just the top component
key = key.split('/')[-1]
# If a tree with that name exists, we want to update it
tree = open_dirs[-1].Get(key)
if not tree:
tree = None
tree = array2tree(arr, name=key, tree=tree)
tree.Write(key, ROOT.TFile.kOverwrite)
root_file.Close() | def function[to_root, parameter[df, path, key, mode, store_index]]:
constant[
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
]
if compare[name[mode] equal[==] constant[a]] begin[:]
variable[mode] assign[=] constant[update]
from relative_module[root_numpy] import module[array2tree]
variable[df_] assign[=] call[name[df].copy, parameter[]]
if name[store_index] begin[:]
variable[name] assign[=] name[df_].index.name
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] constant[]
call[name[df_]][binary_operation[constant[__index__] + name[name]]] assign[=] name[df_].index
for taget[name[col]] in starred[call[name[df_].select_dtypes, parameter[list[[<ast.Constant object at 0x7da18ede6170>]]]].columns] begin[:]
variable[name_components] assign[=] list[[<ast.Constant object at 0x7da18ede4520>, <ast.Name object at 0x7da18ede4460>, <ast.Call object at 0x7da18ede4250>]]
call[name[name_components].extend, parameter[call[name[df_]][name[col]].cat.categories]]
if <ast.ListComp object at 0x7da18ede6da0> begin[:]
variable[sep] assign[=] constant[*]
call[name[df_]][name[col]] assign[=] call[name[df_]][name[col]].cat.codes
call[name[df_].rename, parameter[]]
variable[arr] assign[=] call[name[df_].to_records, parameter[]]
variable[root_file] assign[=] call[name[ROOT].TFile.Open, parameter[name[path], name[mode]]]
if <ast.UnaryOp object at 0x7da18ede7700> begin[:]
<ast.Raise object at 0x7da18ede7eb0>
if <ast.UnaryOp object at 0x7da18ede7790> begin[:]
<ast.Raise object at 0x7da18ede5ed0>
variable[open_dirs] assign[=] list[[<ast.Name object at 0x7da18ede7e20>]]
for taget[name[dir_name]] in starred[call[call[name[key].split, parameter[constant[/]]]][<ast.Slice object at 0x7da18ede5750>]] begin[:]
variable[current_dir] assign[=] call[call[name[open_dirs]][<ast.UnaryOp object at 0x7da18ede6aa0>].Get, parameter[name[dir_name]]]
if <ast.UnaryOp object at 0x7da18ede5e10> begin[:]
variable[current_dir] assign[=] call[call[name[open_dirs]][<ast.UnaryOp object at 0x7da18ede6050>].mkdir, parameter[name[dir_name]]]
call[name[current_dir].cd, parameter[]]
call[name[open_dirs].append, parameter[name[current_dir]]]
variable[key] assign[=] call[call[name[key].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da18ede7220>]
variable[tree] assign[=] call[call[name[open_dirs]][<ast.UnaryOp object at 0x7da1b124e140>].Get, parameter[name[key]]]
if <ast.UnaryOp object at 0x7da1b124d780> begin[:]
variable[tree] assign[=] constant[None]
variable[tree] assign[=] call[name[array2tree], parameter[name[arr]]]
call[name[tree].Write, parameter[name[key], name[ROOT].TFile.kOverwrite]]
call[name[root_file].Close, parameter[]] | keyword[def] identifier[to_root] ( identifier[df] , identifier[path] , identifier[key] = literal[string] , identifier[mode] = literal[string] , identifier[store_index] = keyword[True] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[mode] == literal[string] :
identifier[mode] = literal[string]
keyword[elif] identifier[mode] == literal[string] :
identifier[mode] = literal[string]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[mode] ))
keyword[from] identifier[root_numpy] keyword[import] identifier[array2tree]
identifier[df_] = identifier[df] . identifier[copy] ( identifier[deep] = keyword[False] )
keyword[if] identifier[store_index] :
identifier[name] = identifier[df_] . identifier[index] . identifier[name]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = literal[string]
identifier[df_] [ literal[string] + identifier[name] ]= identifier[df_] . identifier[index]
keyword[for] identifier[col] keyword[in] identifier[df_] . identifier[select_dtypes] ([ literal[string] ]). identifier[columns] :
identifier[name_components] =[ literal[string] , identifier[col] , identifier[str] ( identifier[df_] [ identifier[col] ]. identifier[cat] . identifier[ordered] )]
identifier[name_components] . identifier[extend] ( identifier[df_] [ identifier[col] ]. identifier[cat] . identifier[categories] )
keyword[if] [ literal[string] keyword[not] keyword[in] identifier[c] keyword[for] identifier[c] keyword[in] identifier[name_components] ]:
identifier[sep] = literal[string]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[df_] [ identifier[col] ]= identifier[df_] [ identifier[col] ]. identifier[cat] . identifier[codes]
identifier[df_] . identifier[rename] ( identifier[index] = identifier[str] , identifier[columns] ={ identifier[col] : identifier[sep] . identifier[join] ( identifier[name_components] )}, identifier[inplace] = keyword[True] )
identifier[arr] = identifier[df_] . identifier[to_records] ( identifier[index] = keyword[False] )
identifier[root_file] = identifier[ROOT] . identifier[TFile] . identifier[Open] ( identifier[path] , identifier[mode] )
keyword[if] keyword[not] identifier[root_file] :
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[if] keyword[not] identifier[root_file] . identifier[IsWritable] ():
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[path] ))
identifier[open_dirs] =[ identifier[root_file] ]
keyword[for] identifier[dir_name] keyword[in] identifier[key] . identifier[split] ( literal[string] )[:- literal[int] ]:
identifier[current_dir] = identifier[open_dirs] [- literal[int] ]. identifier[Get] ( identifier[dir_name] )
keyword[if] keyword[not] identifier[current_dir] :
identifier[current_dir] = identifier[open_dirs] [- literal[int] ]. identifier[mkdir] ( identifier[dir_name] )
identifier[current_dir] . identifier[cd] ()
identifier[open_dirs] . identifier[append] ( identifier[current_dir] )
identifier[key] = identifier[key] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[tree] = identifier[open_dirs] [- literal[int] ]. identifier[Get] ( identifier[key] )
keyword[if] keyword[not] identifier[tree] :
identifier[tree] = keyword[None]
identifier[tree] = identifier[array2tree] ( identifier[arr] , identifier[name] = identifier[key] , identifier[tree] = identifier[tree] )
identifier[tree] . identifier[Write] ( identifier[key] , identifier[ROOT] . identifier[TFile] . identifier[kOverwrite] )
identifier[root_file] . identifier[Close] () | def to_root(df, path, key='my_ttree', mode='w', store_index=True, *args, **kwargs):
"""
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
"""
if mode == 'a':
mode = 'update' # depends on [control=['if'], data=['mode']]
elif mode == 'w':
mode = 'recreate' # depends on [control=['if'], data=['mode']]
else:
raise ValueError('Unknown mode: {}. Must be "a" or "w".'.format(mode))
from root_numpy import array2tree
# We don't want to modify the user's DataFrame here, so we make a shallow copy
df_ = df.copy(deep=False)
if store_index:
name = df_.index.name
if name is None:
# Handle the case where the index has no name
name = '' # depends on [control=['if'], data=['name']]
df_['__index__' + name] = df_.index # depends on [control=['if'], data=[]]
# Convert categorical columns into something root_numpy can serialise
for col in df_.select_dtypes(['category']).columns:
name_components = ['__rpCaT', col, str(df_[col].cat.ordered)]
name_components.extend(df_[col].cat.categories)
if ['*' not in c for c in name_components]:
sep = '*' # depends on [control=['if'], data=[]]
else:
raise ValueError('Unable to find suitable separator for columns')
df_[col] = df_[col].cat.codes
df_.rename(index=str, columns={col: sep.join(name_components)}, inplace=True) # depends on [control=['for'], data=['col']]
arr = df_.to_records(index=False)
root_file = ROOT.TFile.Open(path, mode)
if not root_file:
raise IOError('cannot open file {0}'.format(path)) # depends on [control=['if'], data=[]]
if not root_file.IsWritable():
raise IOError('file {0} is not writable'.format(path)) # depends on [control=['if'], data=[]]
# Navigate to the requested directory
open_dirs = [root_file]
for dir_name in key.split('/')[:-1]:
current_dir = open_dirs[-1].Get(dir_name)
if not current_dir:
current_dir = open_dirs[-1].mkdir(dir_name) # depends on [control=['if'], data=[]]
current_dir.cd()
open_dirs.append(current_dir) # depends on [control=['for'], data=['dir_name']]
# The key is now just the top component
key = key.split('/')[-1]
# If a tree with that name exists, we want to update it
tree = open_dirs[-1].Get(key)
if not tree:
tree = None # depends on [control=['if'], data=[]]
tree = array2tree(arr, name=key, tree=tree)
tree.Write(key, ROOT.TFile.kOverwrite)
root_file.Close() |
def splitread(args):
"""
%prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle.
"""
p = OptionParser(splitread.__doc__)
p.add_option("-n", dest="n", default=76, type="int",
help="Split at N-th base position [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="Reverse complement second read [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
base = op.basename(pairsfastq).split(".")[0]
fq1 = base + ".1.fastq"
fq2 = base + ".2.fastq"
fw1 = must_open(fq1, "w")
fw2 = must_open(fq2, "w")
fp = must_open(pairsfastq)
n = opts.n
minsize = n * 8 / 5
for name, seq, qual in FastqGeneralIterator(fp):
if len(seq) < minsize:
logging.error("Skipping read {0}, length={1}".format(name, len(seq)))
continue
name = "@" + name
rec1 = FastqLite(name, seq[:n], qual[:n])
rec2 = FastqLite(name, seq[n:], qual[n:])
if opts.rc:
rec2.rc()
print(rec1, file=fw1)
print(rec2, file=fw2)
logging.debug("Reads split into `{0},{1}`".format(fq1, fq2))
fw1.close()
fw2.close() | def function[splitread, parameter[args]]:
constant[
%prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[splitread].__doc__]]
call[name[p].add_option, parameter[constant[-n]]]
call[name[p].add_option, parameter[constant[--rc]]]
<ast.Tuple object at 0x7da2041db7c0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da2041d82e0>]]
<ast.Tuple object at 0x7da2041da080> assign[=] name[args]
variable[base] assign[=] call[call[call[name[op].basename, parameter[name[pairsfastq]]].split, parameter[constant[.]]]][constant[0]]
variable[fq1] assign[=] binary_operation[name[base] + constant[.1.fastq]]
variable[fq2] assign[=] binary_operation[name[base] + constant[.2.fastq]]
variable[fw1] assign[=] call[name[must_open], parameter[name[fq1], constant[w]]]
variable[fw2] assign[=] call[name[must_open], parameter[name[fq2], constant[w]]]
variable[fp] assign[=] call[name[must_open], parameter[name[pairsfastq]]]
variable[n] assign[=] name[opts].n
variable[minsize] assign[=] binary_operation[binary_operation[name[n] * constant[8]] / constant[5]]
for taget[tuple[[<ast.Name object at 0x7da2041d8400>, <ast.Name object at 0x7da2041db9d0>, <ast.Name object at 0x7da2041d98d0>]]] in starred[call[name[FastqGeneralIterator], parameter[name[fp]]]] begin[:]
if compare[call[name[len], parameter[name[seq]]] less[<] name[minsize]] begin[:]
call[name[logging].error, parameter[call[constant[Skipping read {0}, length={1}].format, parameter[name[name], call[name[len], parameter[name[seq]]]]]]]
continue
variable[name] assign[=] binary_operation[constant[@] + name[name]]
variable[rec1] assign[=] call[name[FastqLite], parameter[name[name], call[name[seq]][<ast.Slice object at 0x7da2041d94e0>], call[name[qual]][<ast.Slice object at 0x7da2041da260>]]]
variable[rec2] assign[=] call[name[FastqLite], parameter[name[name], call[name[seq]][<ast.Slice object at 0x7da2041dba90>], call[name[qual]][<ast.Slice object at 0x7da2041db0a0>]]]
if name[opts].rc begin[:]
call[name[rec2].rc, parameter[]]
call[name[print], parameter[name[rec1]]]
call[name[print], parameter[name[rec2]]]
call[name[logging].debug, parameter[call[constant[Reads split into `{0},{1}`].format, parameter[name[fq1], name[fq2]]]]]
call[name[fw1].close, parameter[]]
call[name[fw2].close, parameter[]] | keyword[def] identifier[splitread] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[splitread] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[dest] = literal[string] , identifier[default] = literal[int] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[pairsfastq] ,= identifier[args]
identifier[base] = identifier[op] . identifier[basename] ( identifier[pairsfastq] ). identifier[split] ( literal[string] )[ literal[int] ]
identifier[fq1] = identifier[base] + literal[string]
identifier[fq2] = identifier[base] + literal[string]
identifier[fw1] = identifier[must_open] ( identifier[fq1] , literal[string] )
identifier[fw2] = identifier[must_open] ( identifier[fq2] , literal[string] )
identifier[fp] = identifier[must_open] ( identifier[pairsfastq] )
identifier[n] = identifier[opts] . identifier[n]
identifier[minsize] = identifier[n] * literal[int] / literal[int]
keyword[for] identifier[name] , identifier[seq] , identifier[qual] keyword[in] identifier[FastqGeneralIterator] ( identifier[fp] ):
keyword[if] identifier[len] ( identifier[seq] )< identifier[minsize] :
identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[name] , identifier[len] ( identifier[seq] )))
keyword[continue]
identifier[name] = literal[string] + identifier[name]
identifier[rec1] = identifier[FastqLite] ( identifier[name] , identifier[seq] [: identifier[n] ], identifier[qual] [: identifier[n] ])
identifier[rec2] = identifier[FastqLite] ( identifier[name] , identifier[seq] [ identifier[n] :], identifier[qual] [ identifier[n] :])
keyword[if] identifier[opts] . identifier[rc] :
identifier[rec2] . identifier[rc] ()
identifier[print] ( identifier[rec1] , identifier[file] = identifier[fw1] )
identifier[print] ( identifier[rec2] , identifier[file] = identifier[fw2] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[fq1] , identifier[fq2] ))
identifier[fw1] . identifier[close] ()
identifier[fw2] . identifier[close] () | def splitread(args):
"""
%prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle.
"""
p = OptionParser(splitread.__doc__)
p.add_option('-n', dest='n', default=76, type='int', help='Split at N-th base position [default: %default]')
p.add_option('--rc', default=False, action='store_true', help='Reverse complement second read [default: %default]')
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(pairsfastq,) = args
base = op.basename(pairsfastq).split('.')[0]
fq1 = base + '.1.fastq'
fq2 = base + '.2.fastq'
fw1 = must_open(fq1, 'w')
fw2 = must_open(fq2, 'w')
fp = must_open(pairsfastq)
n = opts.n
minsize = n * 8 / 5
for (name, seq, qual) in FastqGeneralIterator(fp):
if len(seq) < minsize:
logging.error('Skipping read {0}, length={1}'.format(name, len(seq)))
continue # depends on [control=['if'], data=[]]
name = '@' + name
rec1 = FastqLite(name, seq[:n], qual[:n])
rec2 = FastqLite(name, seq[n:], qual[n:])
if opts.rc:
rec2.rc() # depends on [control=['if'], data=[]]
print(rec1, file=fw1)
print(rec2, file=fw2) # depends on [control=['for'], data=[]]
logging.debug('Reads split into `{0},{1}`'.format(fq1, fq2))
fw1.close()
fw2.close() |
def drain(self):
"""
Read until there is nothing more to be read. Only intended for test code/debugging!
@returns: True on success
@rtype: bool
"""
try:
unlock = self.stick.acquire()
return self.stick.drain()
finally:
unlock() | def function[drain, parameter[self]]:
constant[
Read until there is nothing more to be read. Only intended for test code/debugging!
@returns: True on success
@rtype: bool
]
<ast.Try object at 0x7da18c4cc400> | keyword[def] identifier[drain] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[unlock] = identifier[self] . identifier[stick] . identifier[acquire] ()
keyword[return] identifier[self] . identifier[stick] . identifier[drain] ()
keyword[finally] :
identifier[unlock] () | def drain(self):
"""
Read until there is nothing more to be read. Only intended for test code/debugging!
@returns: True on success
@rtype: bool
"""
try:
unlock = self.stick.acquire()
return self.stick.drain() # depends on [control=['try'], data=[]]
finally:
unlock() |
def build(self, pre=None, shortest=False):
"""Build this rule definition
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
"""
if pre is None:
pre = []
res = deque()
for value in self.values:
try:
res.append(utils.val(value, pre, shortest=shortest))
except errors.FlushGrams as e:
prev = "".join(res)
res.clear()
# this is assuming a scope was pushed!
if len(self.fuzzer._scope_stack) == 1:
pre.append(prev)
else:
stmts = self.fuzzer._curr_scope.setdefault("prev_append", deque())
stmts.extend(pre)
stmts.append(prev)
pre.clear()
continue
except errors.OptGram as e:
continue
except errors.GramFuzzError as e:
print("{} : {}".format(self.name, str(e)))
raise
return self.sep.join(res) | def function[build, parameter[self, pre, shortest]]:
constant[Build this rule definition
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
]
if compare[name[pre] is constant[None]] begin[:]
variable[pre] assign[=] list[[]]
variable[res] assign[=] call[name[deque], parameter[]]
for taget[name[value]] in starred[name[self].values] begin[:]
<ast.Try object at 0x7da1b0315540>
return[call[name[self].sep.join, parameter[name[res]]]] | keyword[def] identifier[build] ( identifier[self] , identifier[pre] = keyword[None] , identifier[shortest] = keyword[False] ):
literal[string]
keyword[if] identifier[pre] keyword[is] keyword[None] :
identifier[pre] =[]
identifier[res] = identifier[deque] ()
keyword[for] identifier[value] keyword[in] identifier[self] . identifier[values] :
keyword[try] :
identifier[res] . identifier[append] ( identifier[utils] . identifier[val] ( identifier[value] , identifier[pre] , identifier[shortest] = identifier[shortest] ))
keyword[except] identifier[errors] . identifier[FlushGrams] keyword[as] identifier[e] :
identifier[prev] = literal[string] . identifier[join] ( identifier[res] )
identifier[res] . identifier[clear] ()
keyword[if] identifier[len] ( identifier[self] . identifier[fuzzer] . identifier[_scope_stack] )== literal[int] :
identifier[pre] . identifier[append] ( identifier[prev] )
keyword[else] :
identifier[stmts] = identifier[self] . identifier[fuzzer] . identifier[_curr_scope] . identifier[setdefault] ( literal[string] , identifier[deque] ())
identifier[stmts] . identifier[extend] ( identifier[pre] )
identifier[stmts] . identifier[append] ( identifier[prev] )
identifier[pre] . identifier[clear] ()
keyword[continue]
keyword[except] identifier[errors] . identifier[OptGram] keyword[as] identifier[e] :
keyword[continue]
keyword[except] identifier[errors] . identifier[GramFuzzError] keyword[as] identifier[e] :
identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[str] ( identifier[e] )))
keyword[raise]
keyword[return] identifier[self] . identifier[sep] . identifier[join] ( identifier[res] ) | def build(self, pre=None, shortest=False):
"""Build this rule definition
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
"""
if pre is None:
pre = [] # depends on [control=['if'], data=['pre']]
res = deque()
for value in self.values:
try:
res.append(utils.val(value, pre, shortest=shortest)) # depends on [control=['try'], data=[]]
except errors.FlushGrams as e:
prev = ''.join(res)
res.clear()
# this is assuming a scope was pushed!
if len(self.fuzzer._scope_stack) == 1:
pre.append(prev) # depends on [control=['if'], data=[]]
else:
stmts = self.fuzzer._curr_scope.setdefault('prev_append', deque())
stmts.extend(pre)
stmts.append(prev)
pre.clear()
continue # depends on [control=['except'], data=[]]
except errors.OptGram as e:
continue # depends on [control=['except'], data=[]]
except errors.GramFuzzError as e:
print('{} : {}'.format(self.name, str(e)))
raise # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['value']]
return self.sep.join(res) |
def handle_pong(self, msg):
"a heart just beat"
current = str_to_bytes(str(self.lifetime))
last = str_to_bytes(str(self.last_ping))
if msg[1] == current:
delta = time.time()-self.tic
# self.log.debug("heartbeat::heart %r took %.2f ms to respond"%(msg[0], 1000*delta))
self.responses.add(msg[0])
elif msg[1] == last:
delta = time.time()-self.tic + (self.lifetime-self.last_ping)
self.log.warn("heartbeat::heart %r missed a beat, and took %.2f ms to respond", msg[0], 1000*delta)
self.responses.add(msg[0])
else:
self.log.warn("heartbeat::got bad heartbeat (possibly old?): %s (current=%.3f)", msg[1], self.lifetime) | def function[handle_pong, parameter[self, msg]]:
constant[a heart just beat]
variable[current] assign[=] call[name[str_to_bytes], parameter[call[name[str], parameter[name[self].lifetime]]]]
variable[last] assign[=] call[name[str_to_bytes], parameter[call[name[str], parameter[name[self].last_ping]]]]
if compare[call[name[msg]][constant[1]] equal[==] name[current]] begin[:]
variable[delta] assign[=] binary_operation[call[name[time].time, parameter[]] - name[self].tic]
call[name[self].responses.add, parameter[call[name[msg]][constant[0]]]] | keyword[def] identifier[handle_pong] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[current] = identifier[str_to_bytes] ( identifier[str] ( identifier[self] . identifier[lifetime] ))
identifier[last] = identifier[str_to_bytes] ( identifier[str] ( identifier[self] . identifier[last_ping] ))
keyword[if] identifier[msg] [ literal[int] ]== identifier[current] :
identifier[delta] = identifier[time] . identifier[time] ()- identifier[self] . identifier[tic]
identifier[self] . identifier[responses] . identifier[add] ( identifier[msg] [ literal[int] ])
keyword[elif] identifier[msg] [ literal[int] ]== identifier[last] :
identifier[delta] = identifier[time] . identifier[time] ()- identifier[self] . identifier[tic] +( identifier[self] . identifier[lifetime] - identifier[self] . identifier[last_ping] )
identifier[self] . identifier[log] . identifier[warn] ( literal[string] , identifier[msg] [ literal[int] ], literal[int] * identifier[delta] )
identifier[self] . identifier[responses] . identifier[add] ( identifier[msg] [ literal[int] ])
keyword[else] :
identifier[self] . identifier[log] . identifier[warn] ( literal[string] , identifier[msg] [ literal[int] ], identifier[self] . identifier[lifetime] ) | def handle_pong(self, msg):
"""a heart just beat"""
current = str_to_bytes(str(self.lifetime))
last = str_to_bytes(str(self.last_ping))
if msg[1] == current:
delta = time.time() - self.tic
# self.log.debug("heartbeat::heart %r took %.2f ms to respond"%(msg[0], 1000*delta))
self.responses.add(msg[0]) # depends on [control=['if'], data=[]]
elif msg[1] == last:
delta = time.time() - self.tic + (self.lifetime - self.last_ping)
self.log.warn('heartbeat::heart %r missed a beat, and took %.2f ms to respond', msg[0], 1000 * delta)
self.responses.add(msg[0]) # depends on [control=['if'], data=[]]
else:
self.log.warn('heartbeat::got bad heartbeat (possibly old?): %s (current=%.3f)', msg[1], self.lifetime) |
def opt(self, x_init, f_fp=None, f=None, fp=None):
"""
Run the optimizer
"""
rcstrings = ['','Maximum number of iterations exceeded', 'Gradient and/or function calls not changing']
opt_dict = {}
if self.xtol is not None:
print("WARNING: bfgs doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
print("WARNING: bfgs doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
opt_dict['gtol'] = self.gtol
opt_result = optimize.fmin_bfgs(f, x_init, fp, disp=self.messages,
maxiter=self.max_iters, full_output=True, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[4]
self.status = rcstrings[opt_result[6]] | def function[opt, parameter[self, x_init, f_fp, f, fp]]:
constant[
Run the optimizer
]
variable[rcstrings] assign[=] list[[<ast.Constant object at 0x7da1b0d03d90>, <ast.Constant object at 0x7da1b0d01720>, <ast.Constant object at 0x7da1b0d03f40>]]
variable[opt_dict] assign[=] dictionary[[], []]
if compare[name[self].xtol is_not constant[None]] begin[:]
call[name[print], parameter[constant[WARNING: bfgs doesn't have an xtol arg, so I'm going to ignore it]]]
if compare[name[self].ftol is_not constant[None]] begin[:]
call[name[print], parameter[constant[WARNING: bfgs doesn't have an ftol arg, so I'm going to ignore it]]]
if compare[name[self].gtol is_not constant[None]] begin[:]
call[name[opt_dict]][constant[gtol]] assign[=] name[self].gtol
variable[opt_result] assign[=] call[name[optimize].fmin_bfgs, parameter[name[f], name[x_init], name[fp]]]
name[self].x_opt assign[=] call[name[opt_result]][constant[0]]
name[self].f_opt assign[=] call[call[name[f_fp], parameter[name[self].x_opt]]][constant[0]]
name[self].funct_eval assign[=] call[name[opt_result]][constant[4]]
name[self].status assign[=] call[name[rcstrings]][call[name[opt_result]][constant[6]]] | keyword[def] identifier[opt] ( identifier[self] , identifier[x_init] , identifier[f_fp] = keyword[None] , identifier[f] = keyword[None] , identifier[fp] = keyword[None] ):
literal[string]
identifier[rcstrings] =[ literal[string] , literal[string] , literal[string] ]
identifier[opt_dict] ={}
keyword[if] identifier[self] . identifier[xtol] keyword[is] keyword[not] keyword[None] :
identifier[print] ( literal[string] )
keyword[if] identifier[self] . identifier[ftol] keyword[is] keyword[not] keyword[None] :
identifier[print] ( literal[string] )
keyword[if] identifier[self] . identifier[gtol] keyword[is] keyword[not] keyword[None] :
identifier[opt_dict] [ literal[string] ]= identifier[self] . identifier[gtol]
identifier[opt_result] = identifier[optimize] . identifier[fmin_bfgs] ( identifier[f] , identifier[x_init] , identifier[fp] , identifier[disp] = identifier[self] . identifier[messages] ,
identifier[maxiter] = identifier[self] . identifier[max_iters] , identifier[full_output] = keyword[True] ,** identifier[opt_dict] )
identifier[self] . identifier[x_opt] = identifier[opt_result] [ literal[int] ]
identifier[self] . identifier[f_opt] = identifier[f_fp] ( identifier[self] . identifier[x_opt] )[ literal[int] ]
identifier[self] . identifier[funct_eval] = identifier[opt_result] [ literal[int] ]
identifier[self] . identifier[status] = identifier[rcstrings] [ identifier[opt_result] [ literal[int] ]] | def opt(self, x_init, f_fp=None, f=None, fp=None):
"""
Run the optimizer
"""
rcstrings = ['', 'Maximum number of iterations exceeded', 'Gradient and/or function calls not changing']
opt_dict = {}
if self.xtol is not None:
print("WARNING: bfgs doesn't have an xtol arg, so I'm going to ignore it") # depends on [control=['if'], data=[]]
if self.ftol is not None:
print("WARNING: bfgs doesn't have an ftol arg, so I'm going to ignore it") # depends on [control=['if'], data=[]]
if self.gtol is not None:
opt_dict['gtol'] = self.gtol # depends on [control=['if'], data=[]]
opt_result = optimize.fmin_bfgs(f, x_init, fp, disp=self.messages, maxiter=self.max_iters, full_output=True, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[4]
self.status = rcstrings[opt_result[6]] |
def sense_ttb(self, target):
"""Sense for a Type B Target is not supported."""
info = "{device} does not support sense for Type B Target"
raise nfc.clf.UnsupportedTargetError(info.format(device=self)) | def function[sense_ttb, parameter[self, target]]:
constant[Sense for a Type B Target is not supported.]
variable[info] assign[=] constant[{device} does not support sense for Type B Target]
<ast.Raise object at 0x7da18ede4880> | keyword[def] identifier[sense_ttb] ( identifier[self] , identifier[target] ):
literal[string]
identifier[info] = literal[string]
keyword[raise] identifier[nfc] . identifier[clf] . identifier[UnsupportedTargetError] ( identifier[info] . identifier[format] ( identifier[device] = identifier[self] )) | def sense_ttb(self, target):
"""Sense for a Type B Target is not supported."""
info = '{device} does not support sense for Type B Target'
raise nfc.clf.UnsupportedTargetError(info.format(device=self)) |
def get_sensors_summary(self):
""" This returns a dict of sensor of the source and their values """
sub_title_list = self.get_sensor_list()
graph_vector_summary = OrderedDict()
for graph_idx, graph_data in enumerate(self.last_measurement):
val_str = str(round(graph_data, 1))
graph_vector_summary[sub_title_list[graph_idx]] = val_str
return graph_vector_summary | def function[get_sensors_summary, parameter[self]]:
constant[ This returns a dict of sensor of the source and their values ]
variable[sub_title_list] assign[=] call[name[self].get_sensor_list, parameter[]]
variable[graph_vector_summary] assign[=] call[name[OrderedDict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18f810a90>, <ast.Name object at 0x7da18f810910>]]] in starred[call[name[enumerate], parameter[name[self].last_measurement]]] begin[:]
variable[val_str] assign[=] call[name[str], parameter[call[name[round], parameter[name[graph_data], constant[1]]]]]
call[name[graph_vector_summary]][call[name[sub_title_list]][name[graph_idx]]] assign[=] name[val_str]
return[name[graph_vector_summary]] | keyword[def] identifier[get_sensors_summary] ( identifier[self] ):
literal[string]
identifier[sub_title_list] = identifier[self] . identifier[get_sensor_list] ()
identifier[graph_vector_summary] = identifier[OrderedDict] ()
keyword[for] identifier[graph_idx] , identifier[graph_data] keyword[in] identifier[enumerate] ( identifier[self] . identifier[last_measurement] ):
identifier[val_str] = identifier[str] ( identifier[round] ( identifier[graph_data] , literal[int] ))
identifier[graph_vector_summary] [ identifier[sub_title_list] [ identifier[graph_idx] ]]= identifier[val_str]
keyword[return] identifier[graph_vector_summary] | def get_sensors_summary(self):
""" This returns a dict of sensor of the source and their values """
sub_title_list = self.get_sensor_list()
graph_vector_summary = OrderedDict()
for (graph_idx, graph_data) in enumerate(self.last_measurement):
val_str = str(round(graph_data, 1))
graph_vector_summary[sub_title_list[graph_idx]] = val_str # depends on [control=['for'], data=[]]
return graph_vector_summary |
def publish_if_new(cls):
"""If the station map has changed, publish the new information."""
message = cls.make_message()
if message != cls.last_message:
super(DashboardPubSub, cls).publish(message)
cls.last_message = message | def function[publish_if_new, parameter[cls]]:
constant[If the station map has changed, publish the new information.]
variable[message] assign[=] call[name[cls].make_message, parameter[]]
if compare[name[message] not_equal[!=] name[cls].last_message] begin[:]
call[call[name[super], parameter[name[DashboardPubSub], name[cls]]].publish, parameter[name[message]]]
name[cls].last_message assign[=] name[message] | keyword[def] identifier[publish_if_new] ( identifier[cls] ):
literal[string]
identifier[message] = identifier[cls] . identifier[make_message] ()
keyword[if] identifier[message] != identifier[cls] . identifier[last_message] :
identifier[super] ( identifier[DashboardPubSub] , identifier[cls] ). identifier[publish] ( identifier[message] )
identifier[cls] . identifier[last_message] = identifier[message] | def publish_if_new(cls):
"""If the station map has changed, publish the new information."""
message = cls.make_message()
if message != cls.last_message:
super(DashboardPubSub, cls).publish(message)
cls.last_message = message # depends on [control=['if'], data=['message']] |
def fit_gen(self, model, data, layer_opt, n_cycle, cycle_len=None, cycle_mult=1, cycle_save_name=None, best_save_name=None,
use_clr=None, use_clr_beta=None, metrics=None, callbacks=None, use_wd_sched=False, norm_wds=False,
wds_sched_mult=None, use_swa=False, swa_start=1, swa_eval_freq=5, **kwargs):
"""Method does some preparation before finally delegating to the 'fit' method for
fitting the model. Namely, if cycle_len is defined, it adds a 'Cosine Annealing'
scheduler for varying the learning rate across iterations.
Method also computes the total number of epochs to fit based on provided 'cycle_len',
'cycle_mult', and 'n_cycle' parameters.
Args:
model (Learner): Any neural architecture for solving a supported problem.
Eg. ResNet-34, RNN_Learner etc.
data (ModelData): An instance of ModelData.
layer_opt (LayerOptimizer): An instance of the LayerOptimizer class
n_cycle (int): number of cycles
cycle_len (int): number of epochs before lr is reset to the initial value.
E.g if cycle_len = 3, then the lr is varied between a maximum
and minimum value over 3 epochs.
cycle_mult (int): additional parameter for influencing how the lr resets over
the cycles. For an intuitive explanation, please see
https://github.com/fastai/fastai/blob/master/courses/dl1/lesson1.ipynb
cycle_save_name (str): use to save the weights at end of each cycle (requires
use_clr, use_clr_beta or cycle_len arg)
best_save_name (str): use to save weights of best model during training.
metrics (function): some function for evaluating a desired metric. Eg. accuracy.
callbacks (list(Callback)): callbacks to apply during the training.
use_wd_sched (bool, optional): set to True to enable weight regularization using
the technique mentioned in https://arxiv.org/abs/1711.05101. When this is True
alone (see below), the regularization is detached from gradient update and
applied directly to the weights.
norm_wds (bool, optional): when this is set to True along with use_wd_sched, the
regularization factor is normalized with each training cycle.
wds_sched_mult (function, optional): when this is provided along with use_wd_sched
as True, the value computed by this function is multiplied with the regularization
strength. This function is passed the WeightDecaySchedule object. And example
function that can be passed is:
f = lambda x: np.array(x.layer_opt.lrs) / x.init_lrs
use_swa (bool, optional): when this is set to True, it will enable the use of
Stochastic Weight Averaging (https://arxiv.org/abs/1803.05407). The learner will
include an additional model (in the swa_model attribute) for keeping track of the
average weights as described in the paper. All testing of this technique so far has
been in image classification, so use in other contexts is not guaranteed to work.
swa_start (int, optional): if use_swa is set to True, then this determines the epoch
to start keeping track of the average weights. It is 1-indexed per the paper's
conventions.
swa_eval_freq (int, optional): if use_swa is set to True, this determines the frequency
at which to evaluate the performance of the swa_model. This evaluation can be costly
for models using BatchNorm (requiring a full pass through the data), which is why the
default is not to evaluate after each epoch.
Returns:
None
"""
if cycle_save_name:
assert use_clr or use_clr_beta or cycle_len, "cycle_save_name argument requires either of the following arguments use_clr, use_clr_beta, cycle_len"
if callbacks is None: callbacks=[]
if metrics is None: metrics=self.metrics
if use_wd_sched:
# This needs to come before CosAnneal() because we need to read the initial learning rate from
# layer_opt.lrs - but CosAnneal() alters the layer_opt.lrs value initially (divides by 100)
if np.sum(layer_opt.wds) == 0:
print('fit() warning: use_wd_sched is set to True, but weight decay(s) passed are 0. Use wds to '
'pass weight decay values.')
batch_per_epoch = len(data.trn_dl)
cl = cycle_len if cycle_len else 1
self.wd_sched = WeightDecaySchedule(layer_opt, batch_per_epoch, cl, cycle_mult, n_cycle,
norm_wds, wds_sched_mult)
callbacks += [self.wd_sched]
if use_clr is not None:
clr_div,cut_div = use_clr[:2]
moms = use_clr[2:] if len(use_clr) > 2 else None
cycle_end = self.get_cycle_end(cycle_save_name)
assert cycle_len, "use_clr requires cycle_len arg"
self.sched = CircularLR(layer_opt, len(data.trn_dl)*cycle_len, on_cycle_end=cycle_end, div=clr_div, cut_div=cut_div,
momentums=moms)
elif use_clr_beta is not None:
div,pct = use_clr_beta[:2]
moms = use_clr_beta[2:] if len(use_clr_beta) > 3 else None
cycle_end = self.get_cycle_end(cycle_save_name)
assert cycle_len, "use_clr_beta requires cycle_len arg"
self.sched = CircularLR_beta(layer_opt, len(data.trn_dl)*cycle_len, on_cycle_end=cycle_end, div=div,
pct=pct, momentums=moms)
elif cycle_len:
cycle_end = self.get_cycle_end(cycle_save_name)
cycle_batches = len(data.trn_dl)*cycle_len
self.sched = CosAnneal(layer_opt, cycle_batches, on_cycle_end=cycle_end, cycle_mult=cycle_mult)
elif not self.sched: self.sched=LossRecorder(layer_opt)
callbacks+=[self.sched]
if best_save_name is not None:
callbacks+=[SaveBestModel(self, layer_opt, metrics, best_save_name)]
if use_swa:
# make a copy of the model to track average weights
self.swa_model = copy.deepcopy(model)
callbacks+=[SWA(model, self.swa_model, swa_start)]
n_epoch = int(sum_geom(cycle_len if cycle_len else 1, cycle_mult, n_cycle))
return fit(model, data, n_epoch, layer_opt.opt, self.crit,
metrics=metrics, callbacks=callbacks, reg_fn=self.reg_fn, clip=self.clip, fp16=self.fp16,
swa_model=self.swa_model if use_swa else None, swa_start=swa_start,
swa_eval_freq=swa_eval_freq, **kwargs) | def function[fit_gen, parameter[self, model, data, layer_opt, n_cycle, cycle_len, cycle_mult, cycle_save_name, best_save_name, use_clr, use_clr_beta, metrics, callbacks, use_wd_sched, norm_wds, wds_sched_mult, use_swa, swa_start, swa_eval_freq]]:
constant[Method does some preparation before finally delegating to the 'fit' method for
fitting the model. Namely, if cycle_len is defined, it adds a 'Cosine Annealing'
scheduler for varying the learning rate across iterations.
Method also computes the total number of epochs to fit based on provided 'cycle_len',
'cycle_mult', and 'n_cycle' parameters.
Args:
model (Learner): Any neural architecture for solving a supported problem.
Eg. ResNet-34, RNN_Learner etc.
data (ModelData): An instance of ModelData.
layer_opt (LayerOptimizer): An instance of the LayerOptimizer class
n_cycle (int): number of cycles
cycle_len (int): number of epochs before lr is reset to the initial value.
E.g if cycle_len = 3, then the lr is varied between a maximum
and minimum value over 3 epochs.
cycle_mult (int): additional parameter for influencing how the lr resets over
the cycles. For an intuitive explanation, please see
https://github.com/fastai/fastai/blob/master/courses/dl1/lesson1.ipynb
cycle_save_name (str): use to save the weights at end of each cycle (requires
use_clr, use_clr_beta or cycle_len arg)
best_save_name (str): use to save weights of best model during training.
metrics (function): some function for evaluating a desired metric. Eg. accuracy.
callbacks (list(Callback)): callbacks to apply during the training.
use_wd_sched (bool, optional): set to True to enable weight regularization using
the technique mentioned in https://arxiv.org/abs/1711.05101. When this is True
alone (see below), the regularization is detached from gradient update and
applied directly to the weights.
norm_wds (bool, optional): when this is set to True along with use_wd_sched, the
regularization factor is normalized with each training cycle.
wds_sched_mult (function, optional): when this is provided along with use_wd_sched
as True, the value computed by this function is multiplied with the regularization
strength. This function is passed the WeightDecaySchedule object. And example
function that can be passed is:
f = lambda x: np.array(x.layer_opt.lrs) / x.init_lrs
use_swa (bool, optional): when this is set to True, it will enable the use of
Stochastic Weight Averaging (https://arxiv.org/abs/1803.05407). The learner will
include an additional model (in the swa_model attribute) for keeping track of the
average weights as described in the paper. All testing of this technique so far has
been in image classification, so use in other contexts is not guaranteed to work.
swa_start (int, optional): if use_swa is set to True, then this determines the epoch
to start keeping track of the average weights. It is 1-indexed per the paper's
conventions.
swa_eval_freq (int, optional): if use_swa is set to True, this determines the frequency
at which to evaluate the performance of the swa_model. This evaluation can be costly
for models using BatchNorm (requiring a full pass through the data), which is why the
default is not to evaluate after each epoch.
Returns:
None
]
if name[cycle_save_name] begin[:]
assert[<ast.BoolOp object at 0x7da1b1e14b50>]
if compare[name[callbacks] is constant[None]] begin[:]
variable[callbacks] assign[=] list[[]]
if compare[name[metrics] is constant[None]] begin[:]
variable[metrics] assign[=] name[self].metrics
if name[use_wd_sched] begin[:]
if compare[call[name[np].sum, parameter[name[layer_opt].wds]] equal[==] constant[0]] begin[:]
call[name[print], parameter[constant[fit() warning: use_wd_sched is set to True, but weight decay(s) passed are 0. Use wds to pass weight decay values.]]]
variable[batch_per_epoch] assign[=] call[name[len], parameter[name[data].trn_dl]]
variable[cl] assign[=] <ast.IfExp object at 0x7da1b1e16e00>
name[self].wd_sched assign[=] call[name[WeightDecaySchedule], parameter[name[layer_opt], name[batch_per_epoch], name[cl], name[cycle_mult], name[n_cycle], name[norm_wds], name[wds_sched_mult]]]
<ast.AugAssign object at 0x7da1b1e167d0>
if compare[name[use_clr] is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da1b1e14d00> assign[=] call[name[use_clr]][<ast.Slice object at 0x7da1b1e162c0>]
variable[moms] assign[=] <ast.IfExp object at 0x7da1b1e15570>
variable[cycle_end] assign[=] call[name[self].get_cycle_end, parameter[name[cycle_save_name]]]
assert[name[cycle_len]]
name[self].sched assign[=] call[name[CircularLR], parameter[name[layer_opt], binary_operation[call[name[len], parameter[name[data].trn_dl]] * name[cycle_len]]]]
<ast.AugAssign object at 0x7da1b1e16bf0>
if compare[name[best_save_name] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b20281c0>
if name[use_swa] begin[:]
name[self].swa_model assign[=] call[name[copy].deepcopy, parameter[name[model]]]
<ast.AugAssign object at 0x7da1b2028ca0>
variable[n_epoch] assign[=] call[name[int], parameter[call[name[sum_geom], parameter[<ast.IfExp object at 0x7da1b20289d0>, name[cycle_mult], name[n_cycle]]]]]
return[call[name[fit], parameter[name[model], name[data], name[n_epoch], name[layer_opt].opt, name[self].crit]]] | keyword[def] identifier[fit_gen] ( identifier[self] , identifier[model] , identifier[data] , identifier[layer_opt] , identifier[n_cycle] , identifier[cycle_len] = keyword[None] , identifier[cycle_mult] = literal[int] , identifier[cycle_save_name] = keyword[None] , identifier[best_save_name] = keyword[None] ,
identifier[use_clr] = keyword[None] , identifier[use_clr_beta] = keyword[None] , identifier[metrics] = keyword[None] , identifier[callbacks] = keyword[None] , identifier[use_wd_sched] = keyword[False] , identifier[norm_wds] = keyword[False] ,
identifier[wds_sched_mult] = keyword[None] , identifier[use_swa] = keyword[False] , identifier[swa_start] = literal[int] , identifier[swa_eval_freq] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[cycle_save_name] :
keyword[assert] identifier[use_clr] keyword[or] identifier[use_clr_beta] keyword[or] identifier[cycle_len] , literal[string]
keyword[if] identifier[callbacks] keyword[is] keyword[None] : identifier[callbacks] =[]
keyword[if] identifier[metrics] keyword[is] keyword[None] : identifier[metrics] = identifier[self] . identifier[metrics]
keyword[if] identifier[use_wd_sched] :
keyword[if] identifier[np] . identifier[sum] ( identifier[layer_opt] . identifier[wds] )== literal[int] :
identifier[print] ( literal[string]
literal[string] )
identifier[batch_per_epoch] = identifier[len] ( identifier[data] . identifier[trn_dl] )
identifier[cl] = identifier[cycle_len] keyword[if] identifier[cycle_len] keyword[else] literal[int]
identifier[self] . identifier[wd_sched] = identifier[WeightDecaySchedule] ( identifier[layer_opt] , identifier[batch_per_epoch] , identifier[cl] , identifier[cycle_mult] , identifier[n_cycle] ,
identifier[norm_wds] , identifier[wds_sched_mult] )
identifier[callbacks] +=[ identifier[self] . identifier[wd_sched] ]
keyword[if] identifier[use_clr] keyword[is] keyword[not] keyword[None] :
identifier[clr_div] , identifier[cut_div] = identifier[use_clr] [: literal[int] ]
identifier[moms] = identifier[use_clr] [ literal[int] :] keyword[if] identifier[len] ( identifier[use_clr] )> literal[int] keyword[else] keyword[None]
identifier[cycle_end] = identifier[self] . identifier[get_cycle_end] ( identifier[cycle_save_name] )
keyword[assert] identifier[cycle_len] , literal[string]
identifier[self] . identifier[sched] = identifier[CircularLR] ( identifier[layer_opt] , identifier[len] ( identifier[data] . identifier[trn_dl] )* identifier[cycle_len] , identifier[on_cycle_end] = identifier[cycle_end] , identifier[div] = identifier[clr_div] , identifier[cut_div] = identifier[cut_div] ,
identifier[momentums] = identifier[moms] )
keyword[elif] identifier[use_clr_beta] keyword[is] keyword[not] keyword[None] :
identifier[div] , identifier[pct] = identifier[use_clr_beta] [: literal[int] ]
identifier[moms] = identifier[use_clr_beta] [ literal[int] :] keyword[if] identifier[len] ( identifier[use_clr_beta] )> literal[int] keyword[else] keyword[None]
identifier[cycle_end] = identifier[self] . identifier[get_cycle_end] ( identifier[cycle_save_name] )
keyword[assert] identifier[cycle_len] , literal[string]
identifier[self] . identifier[sched] = identifier[CircularLR_beta] ( identifier[layer_opt] , identifier[len] ( identifier[data] . identifier[trn_dl] )* identifier[cycle_len] , identifier[on_cycle_end] = identifier[cycle_end] , identifier[div] = identifier[div] ,
identifier[pct] = identifier[pct] , identifier[momentums] = identifier[moms] )
keyword[elif] identifier[cycle_len] :
identifier[cycle_end] = identifier[self] . identifier[get_cycle_end] ( identifier[cycle_save_name] )
identifier[cycle_batches] = identifier[len] ( identifier[data] . identifier[trn_dl] )* identifier[cycle_len]
identifier[self] . identifier[sched] = identifier[CosAnneal] ( identifier[layer_opt] , identifier[cycle_batches] , identifier[on_cycle_end] = identifier[cycle_end] , identifier[cycle_mult] = identifier[cycle_mult] )
keyword[elif] keyword[not] identifier[self] . identifier[sched] : identifier[self] . identifier[sched] = identifier[LossRecorder] ( identifier[layer_opt] )
identifier[callbacks] +=[ identifier[self] . identifier[sched] ]
keyword[if] identifier[best_save_name] keyword[is] keyword[not] keyword[None] :
identifier[callbacks] +=[ identifier[SaveBestModel] ( identifier[self] , identifier[layer_opt] , identifier[metrics] , identifier[best_save_name] )]
keyword[if] identifier[use_swa] :
identifier[self] . identifier[swa_model] = identifier[copy] . identifier[deepcopy] ( identifier[model] )
identifier[callbacks] +=[ identifier[SWA] ( identifier[model] , identifier[self] . identifier[swa_model] , identifier[swa_start] )]
identifier[n_epoch] = identifier[int] ( identifier[sum_geom] ( identifier[cycle_len] keyword[if] identifier[cycle_len] keyword[else] literal[int] , identifier[cycle_mult] , identifier[n_cycle] ))
keyword[return] identifier[fit] ( identifier[model] , identifier[data] , identifier[n_epoch] , identifier[layer_opt] . identifier[opt] , identifier[self] . identifier[crit] ,
identifier[metrics] = identifier[metrics] , identifier[callbacks] = identifier[callbacks] , identifier[reg_fn] = identifier[self] . identifier[reg_fn] , identifier[clip] = identifier[self] . identifier[clip] , identifier[fp16] = identifier[self] . identifier[fp16] ,
identifier[swa_model] = identifier[self] . identifier[swa_model] keyword[if] identifier[use_swa] keyword[else] keyword[None] , identifier[swa_start] = identifier[swa_start] ,
identifier[swa_eval_freq] = identifier[swa_eval_freq] ,** identifier[kwargs] ) | def fit_gen(self, model, data, layer_opt, n_cycle, cycle_len=None, cycle_mult=1, cycle_save_name=None, best_save_name=None, use_clr=None, use_clr_beta=None, metrics=None, callbacks=None, use_wd_sched=False, norm_wds=False, wds_sched_mult=None, use_swa=False, swa_start=1, swa_eval_freq=5, **kwargs):
"""Method does some preparation before finally delegating to the 'fit' method for
fitting the model. Namely, if cycle_len is defined, it adds a 'Cosine Annealing'
scheduler for varying the learning rate across iterations.
Method also computes the total number of epochs to fit based on provided 'cycle_len',
'cycle_mult', and 'n_cycle' parameters.
Args:
model (Learner): Any neural architecture for solving a supported problem.
Eg. ResNet-34, RNN_Learner etc.
data (ModelData): An instance of ModelData.
layer_opt (LayerOptimizer): An instance of the LayerOptimizer class
n_cycle (int): number of cycles
cycle_len (int): number of epochs before lr is reset to the initial value.
E.g if cycle_len = 3, then the lr is varied between a maximum
and minimum value over 3 epochs.
cycle_mult (int): additional parameter for influencing how the lr resets over
the cycles. For an intuitive explanation, please see
https://github.com/fastai/fastai/blob/master/courses/dl1/lesson1.ipynb
cycle_save_name (str): use to save the weights at end of each cycle (requires
use_clr, use_clr_beta or cycle_len arg)
best_save_name (str): use to save weights of best model during training.
metrics (function): some function for evaluating a desired metric. Eg. accuracy.
callbacks (list(Callback)): callbacks to apply during the training.
use_wd_sched (bool, optional): set to True to enable weight regularization using
the technique mentioned in https://arxiv.org/abs/1711.05101. When this is True
alone (see below), the regularization is detached from gradient update and
applied directly to the weights.
norm_wds (bool, optional): when this is set to True along with use_wd_sched, the
regularization factor is normalized with each training cycle.
wds_sched_mult (function, optional): when this is provided along with use_wd_sched
as True, the value computed by this function is multiplied with the regularization
strength. This function is passed the WeightDecaySchedule object. And example
function that can be passed is:
f = lambda x: np.array(x.layer_opt.lrs) / x.init_lrs
use_swa (bool, optional): when this is set to True, it will enable the use of
Stochastic Weight Averaging (https://arxiv.org/abs/1803.05407). The learner will
include an additional model (in the swa_model attribute) for keeping track of the
average weights as described in the paper. All testing of this technique so far has
been in image classification, so use in other contexts is not guaranteed to work.
swa_start (int, optional): if use_swa is set to True, then this determines the epoch
to start keeping track of the average weights. It is 1-indexed per the paper's
conventions.
swa_eval_freq (int, optional): if use_swa is set to True, this determines the frequency
at which to evaluate the performance of the swa_model. This evaluation can be costly
for models using BatchNorm (requiring a full pass through the data), which is why the
default is not to evaluate after each epoch.
Returns:
None
"""
if cycle_save_name:
assert use_clr or use_clr_beta or cycle_len, 'cycle_save_name argument requires either of the following arguments use_clr, use_clr_beta, cycle_len' # depends on [control=['if'], data=[]]
if callbacks is None:
callbacks = [] # depends on [control=['if'], data=['callbacks']]
if metrics is None:
metrics = self.metrics # depends on [control=['if'], data=['metrics']]
if use_wd_sched:
# This needs to come before CosAnneal() because we need to read the initial learning rate from
# layer_opt.lrs - but CosAnneal() alters the layer_opt.lrs value initially (divides by 100)
if np.sum(layer_opt.wds) == 0:
print('fit() warning: use_wd_sched is set to True, but weight decay(s) passed are 0. Use wds to pass weight decay values.') # depends on [control=['if'], data=[]]
batch_per_epoch = len(data.trn_dl)
cl = cycle_len if cycle_len else 1
self.wd_sched = WeightDecaySchedule(layer_opt, batch_per_epoch, cl, cycle_mult, n_cycle, norm_wds, wds_sched_mult)
callbacks += [self.wd_sched] # depends on [control=['if'], data=[]]
if use_clr is not None:
(clr_div, cut_div) = use_clr[:2]
moms = use_clr[2:] if len(use_clr) > 2 else None
cycle_end = self.get_cycle_end(cycle_save_name)
assert cycle_len, 'use_clr requires cycle_len arg'
self.sched = CircularLR(layer_opt, len(data.trn_dl) * cycle_len, on_cycle_end=cycle_end, div=clr_div, cut_div=cut_div, momentums=moms) # depends on [control=['if'], data=['use_clr']]
elif use_clr_beta is not None:
(div, pct) = use_clr_beta[:2]
moms = use_clr_beta[2:] if len(use_clr_beta) > 3 else None
cycle_end = self.get_cycle_end(cycle_save_name)
assert cycle_len, 'use_clr_beta requires cycle_len arg'
self.sched = CircularLR_beta(layer_opt, len(data.trn_dl) * cycle_len, on_cycle_end=cycle_end, div=div, pct=pct, momentums=moms) # depends on [control=['if'], data=['use_clr_beta']]
elif cycle_len:
cycle_end = self.get_cycle_end(cycle_save_name)
cycle_batches = len(data.trn_dl) * cycle_len
self.sched = CosAnneal(layer_opt, cycle_batches, on_cycle_end=cycle_end, cycle_mult=cycle_mult) # depends on [control=['if'], data=[]]
elif not self.sched:
self.sched = LossRecorder(layer_opt) # depends on [control=['if'], data=[]]
callbacks += [self.sched]
if best_save_name is not None:
callbacks += [SaveBestModel(self, layer_opt, metrics, best_save_name)] # depends on [control=['if'], data=['best_save_name']]
if use_swa:
# make a copy of the model to track average weights
self.swa_model = copy.deepcopy(model)
callbacks += [SWA(model, self.swa_model, swa_start)] # depends on [control=['if'], data=[]]
n_epoch = int(sum_geom(cycle_len if cycle_len else 1, cycle_mult, n_cycle))
return fit(model, data, n_epoch, layer_opt.opt, self.crit, metrics=metrics, callbacks=callbacks, reg_fn=self.reg_fn, clip=self.clip, fp16=self.fp16, swa_model=self.swa_model if use_swa else None, swa_start=swa_start, swa_eval_freq=swa_eval_freq, **kwargs) |
def exception_periods(self, range_start=datetime.date.min, range_end=datetime.date.max):
"""Returns a list of Period tuples for each period represented in an <exception>
that falls between range_start and range_end."""
periods = []
for exception_date, exception_times in self.exceptions.items():
if exception_date >= range_start and exception_date <= range_end:
for exception_time in exception_times:
periods.append(
Period(
self.timezone.localize(datetime.datetime.combine(exception_date, exception_time.start)),
self.timezone.localize(datetime.datetime.combine(exception_date, exception_time.end))
)
)
periods.sort()
return periods | def function[exception_periods, parameter[self, range_start, range_end]]:
constant[Returns a list of Period tuples for each period represented in an <exception>
that falls between range_start and range_end.]
variable[periods] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1aff6f010>, <ast.Name object at 0x7da1aff6e2c0>]]] in starred[call[name[self].exceptions.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1aff6dd80> begin[:]
for taget[name[exception_time]] in starred[name[exception_times]] begin[:]
call[name[periods].append, parameter[call[name[Period], parameter[call[name[self].timezone.localize, parameter[call[name[datetime].datetime.combine, parameter[name[exception_date], name[exception_time].start]]]], call[name[self].timezone.localize, parameter[call[name[datetime].datetime.combine, parameter[name[exception_date], name[exception_time].end]]]]]]]]
call[name[periods].sort, parameter[]]
return[name[periods]] | keyword[def] identifier[exception_periods] ( identifier[self] , identifier[range_start] = identifier[datetime] . identifier[date] . identifier[min] , identifier[range_end] = identifier[datetime] . identifier[date] . identifier[max] ):
literal[string]
identifier[periods] =[]
keyword[for] identifier[exception_date] , identifier[exception_times] keyword[in] identifier[self] . identifier[exceptions] . identifier[items] ():
keyword[if] identifier[exception_date] >= identifier[range_start] keyword[and] identifier[exception_date] <= identifier[range_end] :
keyword[for] identifier[exception_time] keyword[in] identifier[exception_times] :
identifier[periods] . identifier[append] (
identifier[Period] (
identifier[self] . identifier[timezone] . identifier[localize] ( identifier[datetime] . identifier[datetime] . identifier[combine] ( identifier[exception_date] , identifier[exception_time] . identifier[start] )),
identifier[self] . identifier[timezone] . identifier[localize] ( identifier[datetime] . identifier[datetime] . identifier[combine] ( identifier[exception_date] , identifier[exception_time] . identifier[end] ))
)
)
identifier[periods] . identifier[sort] ()
keyword[return] identifier[periods] | def exception_periods(self, range_start=datetime.date.min, range_end=datetime.date.max):
"""Returns a list of Period tuples for each period represented in an <exception>
that falls between range_start and range_end."""
periods = []
for (exception_date, exception_times) in self.exceptions.items():
if exception_date >= range_start and exception_date <= range_end:
for exception_time in exception_times:
periods.append(Period(self.timezone.localize(datetime.datetime.combine(exception_date, exception_time.start)), self.timezone.localize(datetime.datetime.combine(exception_date, exception_time.end)))) # depends on [control=['for'], data=['exception_time']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
periods.sort()
return periods |
def tensor_up(pauli_sum: Union[PauliSum, PauliTerm], qubits: List[int]):
"""
Takes a PauliSum object along with a list of
qubits and returns a matrix corresponding the tensor representation of the
object.
This is the same as :py:func:`lifted_pauli`. Nick R originally wrote this functionality
and really likes the name ``tensor_up``. Who can blame him?
:param pauli_sum: Pauli representation of an operator
:param qubits: list of qubits in the order they will be represented in the resultant matrix.
:returns: matrix representation of the pauli_sum operator
"""
return lifted_pauli(pauli_sum=pauli_sum, qubits=qubits) | def function[tensor_up, parameter[pauli_sum, qubits]]:
constant[
Takes a PauliSum object along with a list of
qubits and returns a matrix corresponding the tensor representation of the
object.
This is the same as :py:func:`lifted_pauli`. Nick R originally wrote this functionality
and really likes the name ``tensor_up``. Who can blame him?
:param pauli_sum: Pauli representation of an operator
:param qubits: list of qubits in the order they will be represented in the resultant matrix.
:returns: matrix representation of the pauli_sum operator
]
return[call[name[lifted_pauli], parameter[]]] | keyword[def] identifier[tensor_up] ( identifier[pauli_sum] : identifier[Union] [ identifier[PauliSum] , identifier[PauliTerm] ], identifier[qubits] : identifier[List] [ identifier[int] ]):
literal[string]
keyword[return] identifier[lifted_pauli] ( identifier[pauli_sum] = identifier[pauli_sum] , identifier[qubits] = identifier[qubits] ) | def tensor_up(pauli_sum: Union[PauliSum, PauliTerm], qubits: List[int]):
"""
Takes a PauliSum object along with a list of
qubits and returns a matrix corresponding the tensor representation of the
object.
This is the same as :py:func:`lifted_pauli`. Nick R originally wrote this functionality
and really likes the name ``tensor_up``. Who can blame him?
:param pauli_sum: Pauli representation of an operator
:param qubits: list of qubits in the order they will be represented in the resultant matrix.
:returns: matrix representation of the pauli_sum operator
"""
return lifted_pauli(pauli_sum=pauli_sum, qubits=qubits) |
def initialize_smart_disassemble(self, data, start_address=0):
"""
Set the binary buffer to disassemble with other related information
ready for an instruction by instruction disassembly session.
"""
_opcodes.initialize_smart_disassemble(
self._ptr, data, start_address) | def function[initialize_smart_disassemble, parameter[self, data, start_address]]:
constant[
Set the binary buffer to disassemble with other related information
ready for an instruction by instruction disassembly session.
]
call[name[_opcodes].initialize_smart_disassemble, parameter[name[self]._ptr, name[data], name[start_address]]] | keyword[def] identifier[initialize_smart_disassemble] ( identifier[self] , identifier[data] , identifier[start_address] = literal[int] ):
literal[string]
identifier[_opcodes] . identifier[initialize_smart_disassemble] (
identifier[self] . identifier[_ptr] , identifier[data] , identifier[start_address] ) | def initialize_smart_disassemble(self, data, start_address=0):
"""
Set the binary buffer to disassemble with other related information
ready for an instruction by instruction disassembly session.
"""
_opcodes.initialize_smart_disassemble(self._ptr, data, start_address) |
def _start_enqueue_thread(self):
""" Internal method to start the enqueue thread which adds the events in an internal queue. """
self._enqueueThreadSignal.acquire()
self._enqueueThread = Thread(target=self._enqueue_function)
self._enqueueThread.daemon = True
self._enqueueThread.start()
self._enqueueThreadSignal.wait()
self._enqueueThreadSignal.release() | def function[_start_enqueue_thread, parameter[self]]:
constant[ Internal method to start the enqueue thread which adds the events in an internal queue. ]
call[name[self]._enqueueThreadSignal.acquire, parameter[]]
name[self]._enqueueThread assign[=] call[name[Thread], parameter[]]
name[self]._enqueueThread.daemon assign[=] constant[True]
call[name[self]._enqueueThread.start, parameter[]]
call[name[self]._enqueueThreadSignal.wait, parameter[]]
call[name[self]._enqueueThreadSignal.release, parameter[]] | keyword[def] identifier[_start_enqueue_thread] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_enqueueThreadSignal] . identifier[acquire] ()
identifier[self] . identifier[_enqueueThread] = identifier[Thread] ( identifier[target] = identifier[self] . identifier[_enqueue_function] )
identifier[self] . identifier[_enqueueThread] . identifier[daemon] = keyword[True]
identifier[self] . identifier[_enqueueThread] . identifier[start] ()
identifier[self] . identifier[_enqueueThreadSignal] . identifier[wait] ()
identifier[self] . identifier[_enqueueThreadSignal] . identifier[release] () | def _start_enqueue_thread(self):
""" Internal method to start the enqueue thread which adds the events in an internal queue. """
self._enqueueThreadSignal.acquire()
self._enqueueThread = Thread(target=self._enqueue_function)
self._enqueueThread.daemon = True
self._enqueueThread.start()
self._enqueueThreadSignal.wait()
self._enqueueThreadSignal.release() |
def FromReadings(cls, uuid, readings, root_key=AuthProvider.NoKey, signer=None,
report_id=IOTileReading.InvalidReadingID, selector=0xFFFF, streamer=0, sent_timestamp=0):
"""Generate an instance of the report format from a list of readings and a uuid.
The signed list report is created using the passed readings and signed using the specified method
and AuthProvider. If no auth provider is specified, the report is signed using the default authorization
chain.
Args:
uuid (int): The uuid of the deviec that this report came from
readings (list): A list of IOTileReading objects containing the data in the report
root_key (int): The key that should be used to sign the report (must be supported
by an auth_provider)
signer (AuthProvider): An optional preconfigured AuthProvider that should be used to sign this
report. If no AuthProvider is provided, the default ChainedAuthProvider is used.
report_id (int): The id of the report. If not provided it defaults to IOTileReading.InvalidReadingID.
Note that you can specify anything you want for the report id but for actual IOTile devices
the report id will always be greater than the id of all of the readings contained in the report
since devices generate ids sequentially.
selector (int): The streamer selector of this report. This can be anything but if the report came from
a device, it would correspond with the query the device used to pick readings to go into the report.
streamer (int): The streamer id that this reading was sent from.
sent_timestamp (int): The device's uptime that sent this report.
"""
lowest_id = IOTileReading.InvalidReadingID
highest_id = IOTileReading.InvalidReadingID
report_len = 20 + 16*len(readings) + 24
len_low = report_len & 0xFF
len_high = report_len >> 8
unique_readings = [x.reading_id for x in readings if x.reading_id != IOTileReading.InvalidReadingID]
if len(unique_readings) > 0:
lowest_id = min(unique_readings)
highest_id = max(unique_readings)
header = struct.pack("<BBHLLLBBH", cls.ReportType, len_low, len_high, uuid, report_id,
sent_timestamp, root_key, streamer, selector)
header = bytearray(header)
packed_readings = bytearray()
for reading in readings:
packed_reading = struct.pack("<HHLLL", reading.stream, 0, reading.reading_id,
reading.raw_time, reading.value)
packed_readings += bytearray(packed_reading)
footer_stats = struct.pack("<LL", lowest_id, highest_id)
if signer is None:
signer = ChainedAuthProvider()
# If we are supposed to encrypt this report, do the encryption
if root_key != signer.NoKey:
enc_data = packed_readings
try:
result = signer.encrypt_report(uuid, root_key, enc_data, report_id=report_id,
sent_timestamp=sent_timestamp)
except NotFoundError:
raise ExternalError("Could not encrypt report because no AuthProvider supported "
"the requested encryption method for the requested device",
device_id=uuid, root_key=root_key)
signed_data = header + result['data'] + footer_stats
else:
signed_data = header + packed_readings + footer_stats
try:
signature = signer.sign_report(uuid, root_key, signed_data, report_id=report_id,
sent_timestamp=sent_timestamp)
except NotFoundError:
raise ExternalError("Could not sign report because no AuthProvider supported the requested "
"signature method for the requested device", device_id=uuid, root_key=root_key)
footer = struct.pack("16s", bytes(signature['signature'][:16]))
footer = bytearray(footer)
data = signed_data + footer
return SignedListReport(data) | def function[FromReadings, parameter[cls, uuid, readings, root_key, signer, report_id, selector, streamer, sent_timestamp]]:
constant[Generate an instance of the report format from a list of readings and a uuid.
The signed list report is created using the passed readings and signed using the specified method
and AuthProvider. If no auth provider is specified, the report is signed using the default authorization
chain.
Args:
uuid (int): The uuid of the deviec that this report came from
readings (list): A list of IOTileReading objects containing the data in the report
root_key (int): The key that should be used to sign the report (must be supported
by an auth_provider)
signer (AuthProvider): An optional preconfigured AuthProvider that should be used to sign this
report. If no AuthProvider is provided, the default ChainedAuthProvider is used.
report_id (int): The id of the report. If not provided it defaults to IOTileReading.InvalidReadingID.
Note that you can specify anything you want for the report id but for actual IOTile devices
the report id will always be greater than the id of all of the readings contained in the report
since devices generate ids sequentially.
selector (int): The streamer selector of this report. This can be anything but if the report came from
a device, it would correspond with the query the device used to pick readings to go into the report.
streamer (int): The streamer id that this reading was sent from.
sent_timestamp (int): The device's uptime that sent this report.
]
variable[lowest_id] assign[=] name[IOTileReading].InvalidReadingID
variable[highest_id] assign[=] name[IOTileReading].InvalidReadingID
variable[report_len] assign[=] binary_operation[binary_operation[constant[20] + binary_operation[constant[16] * call[name[len], parameter[name[readings]]]]] + constant[24]]
variable[len_low] assign[=] binary_operation[name[report_len] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]
variable[len_high] assign[=] binary_operation[name[report_len] <ast.RShift object at 0x7da2590d6a40> constant[8]]
variable[unique_readings] assign[=] <ast.ListComp object at 0x7da20e956a10>
if compare[call[name[len], parameter[name[unique_readings]]] greater[>] constant[0]] begin[:]
variable[lowest_id] assign[=] call[name[min], parameter[name[unique_readings]]]
variable[highest_id] assign[=] call[name[max], parameter[name[unique_readings]]]
variable[header] assign[=] call[name[struct].pack, parameter[constant[<BBHLLLBBH], name[cls].ReportType, name[len_low], name[len_high], name[uuid], name[report_id], name[sent_timestamp], name[root_key], name[streamer], name[selector]]]
variable[header] assign[=] call[name[bytearray], parameter[name[header]]]
variable[packed_readings] assign[=] call[name[bytearray], parameter[]]
for taget[name[reading]] in starred[name[readings]] begin[:]
variable[packed_reading] assign[=] call[name[struct].pack, parameter[constant[<HHLLL], name[reading].stream, constant[0], name[reading].reading_id, name[reading].raw_time, name[reading].value]]
<ast.AugAssign object at 0x7da20e9563b0>
variable[footer_stats] assign[=] call[name[struct].pack, parameter[constant[<LL], name[lowest_id], name[highest_id]]]
if compare[name[signer] is constant[None]] begin[:]
variable[signer] assign[=] call[name[ChainedAuthProvider], parameter[]]
if compare[name[root_key] not_equal[!=] name[signer].NoKey] begin[:]
variable[enc_data] assign[=] name[packed_readings]
<ast.Try object at 0x7da20e956980>
variable[signed_data] assign[=] binary_operation[binary_operation[name[header] + call[name[result]][constant[data]]] + name[footer_stats]]
<ast.Try object at 0x7da20e956500>
variable[footer] assign[=] call[name[struct].pack, parameter[constant[16s], call[name[bytes], parameter[call[call[name[signature]][constant[signature]]][<ast.Slice object at 0x7da18fe92c20>]]]]]
variable[footer] assign[=] call[name[bytearray], parameter[name[footer]]]
variable[data] assign[=] binary_operation[name[signed_data] + name[footer]]
return[call[name[SignedListReport], parameter[name[data]]]] | keyword[def] identifier[FromReadings] ( identifier[cls] , identifier[uuid] , identifier[readings] , identifier[root_key] = identifier[AuthProvider] . identifier[NoKey] , identifier[signer] = keyword[None] ,
identifier[report_id] = identifier[IOTileReading] . identifier[InvalidReadingID] , identifier[selector] = literal[int] , identifier[streamer] = literal[int] , identifier[sent_timestamp] = literal[int] ):
literal[string]
identifier[lowest_id] = identifier[IOTileReading] . identifier[InvalidReadingID]
identifier[highest_id] = identifier[IOTileReading] . identifier[InvalidReadingID]
identifier[report_len] = literal[int] + literal[int] * identifier[len] ( identifier[readings] )+ literal[int]
identifier[len_low] = identifier[report_len] & literal[int]
identifier[len_high] = identifier[report_len] >> literal[int]
identifier[unique_readings] =[ identifier[x] . identifier[reading_id] keyword[for] identifier[x] keyword[in] identifier[readings] keyword[if] identifier[x] . identifier[reading_id] != identifier[IOTileReading] . identifier[InvalidReadingID] ]
keyword[if] identifier[len] ( identifier[unique_readings] )> literal[int] :
identifier[lowest_id] = identifier[min] ( identifier[unique_readings] )
identifier[highest_id] = identifier[max] ( identifier[unique_readings] )
identifier[header] = identifier[struct] . identifier[pack] ( literal[string] , identifier[cls] . identifier[ReportType] , identifier[len_low] , identifier[len_high] , identifier[uuid] , identifier[report_id] ,
identifier[sent_timestamp] , identifier[root_key] , identifier[streamer] , identifier[selector] )
identifier[header] = identifier[bytearray] ( identifier[header] )
identifier[packed_readings] = identifier[bytearray] ()
keyword[for] identifier[reading] keyword[in] identifier[readings] :
identifier[packed_reading] = identifier[struct] . identifier[pack] ( literal[string] , identifier[reading] . identifier[stream] , literal[int] , identifier[reading] . identifier[reading_id] ,
identifier[reading] . identifier[raw_time] , identifier[reading] . identifier[value] )
identifier[packed_readings] += identifier[bytearray] ( identifier[packed_reading] )
identifier[footer_stats] = identifier[struct] . identifier[pack] ( literal[string] , identifier[lowest_id] , identifier[highest_id] )
keyword[if] identifier[signer] keyword[is] keyword[None] :
identifier[signer] = identifier[ChainedAuthProvider] ()
keyword[if] identifier[root_key] != identifier[signer] . identifier[NoKey] :
identifier[enc_data] = identifier[packed_readings]
keyword[try] :
identifier[result] = identifier[signer] . identifier[encrypt_report] ( identifier[uuid] , identifier[root_key] , identifier[enc_data] , identifier[report_id] = identifier[report_id] ,
identifier[sent_timestamp] = identifier[sent_timestamp] )
keyword[except] identifier[NotFoundError] :
keyword[raise] identifier[ExternalError] ( literal[string]
literal[string] ,
identifier[device_id] = identifier[uuid] , identifier[root_key] = identifier[root_key] )
identifier[signed_data] = identifier[header] + identifier[result] [ literal[string] ]+ identifier[footer_stats]
keyword[else] :
identifier[signed_data] = identifier[header] + identifier[packed_readings] + identifier[footer_stats]
keyword[try] :
identifier[signature] = identifier[signer] . identifier[sign_report] ( identifier[uuid] , identifier[root_key] , identifier[signed_data] , identifier[report_id] = identifier[report_id] ,
identifier[sent_timestamp] = identifier[sent_timestamp] )
keyword[except] identifier[NotFoundError] :
keyword[raise] identifier[ExternalError] ( literal[string]
literal[string] , identifier[device_id] = identifier[uuid] , identifier[root_key] = identifier[root_key] )
identifier[footer] = identifier[struct] . identifier[pack] ( literal[string] , identifier[bytes] ( identifier[signature] [ literal[string] ][: literal[int] ]))
identifier[footer] = identifier[bytearray] ( identifier[footer] )
identifier[data] = identifier[signed_data] + identifier[footer]
keyword[return] identifier[SignedListReport] ( identifier[data] ) | def FromReadings(cls, uuid, readings, root_key=AuthProvider.NoKey, signer=None, report_id=IOTileReading.InvalidReadingID, selector=65535, streamer=0, sent_timestamp=0):
"""Generate an instance of the report format from a list of readings and a uuid.
The signed list report is created using the passed readings and signed using the specified method
and AuthProvider. If no auth provider is specified, the report is signed using the default authorization
chain.
Args:
uuid (int): The uuid of the deviec that this report came from
readings (list): A list of IOTileReading objects containing the data in the report
root_key (int): The key that should be used to sign the report (must be supported
by an auth_provider)
signer (AuthProvider): An optional preconfigured AuthProvider that should be used to sign this
report. If no AuthProvider is provided, the default ChainedAuthProvider is used.
report_id (int): The id of the report. If not provided it defaults to IOTileReading.InvalidReadingID.
Note that you can specify anything you want for the report id but for actual IOTile devices
the report id will always be greater than the id of all of the readings contained in the report
since devices generate ids sequentially.
selector (int): The streamer selector of this report. This can be anything but if the report came from
a device, it would correspond with the query the device used to pick readings to go into the report.
streamer (int): The streamer id that this reading was sent from.
sent_timestamp (int): The device's uptime that sent this report.
"""
lowest_id = IOTileReading.InvalidReadingID
highest_id = IOTileReading.InvalidReadingID
report_len = 20 + 16 * len(readings) + 24
len_low = report_len & 255
len_high = report_len >> 8
unique_readings = [x.reading_id for x in readings if x.reading_id != IOTileReading.InvalidReadingID]
if len(unique_readings) > 0:
lowest_id = min(unique_readings)
highest_id = max(unique_readings) # depends on [control=['if'], data=[]]
header = struct.pack('<BBHLLLBBH', cls.ReportType, len_low, len_high, uuid, report_id, sent_timestamp, root_key, streamer, selector)
header = bytearray(header)
packed_readings = bytearray()
for reading in readings:
packed_reading = struct.pack('<HHLLL', reading.stream, 0, reading.reading_id, reading.raw_time, reading.value)
packed_readings += bytearray(packed_reading) # depends on [control=['for'], data=['reading']]
footer_stats = struct.pack('<LL', lowest_id, highest_id)
if signer is None:
signer = ChainedAuthProvider() # depends on [control=['if'], data=['signer']]
# If we are supposed to encrypt this report, do the encryption
if root_key != signer.NoKey:
enc_data = packed_readings
try:
result = signer.encrypt_report(uuid, root_key, enc_data, report_id=report_id, sent_timestamp=sent_timestamp) # depends on [control=['try'], data=[]]
except NotFoundError:
raise ExternalError('Could not encrypt report because no AuthProvider supported the requested encryption method for the requested device', device_id=uuid, root_key=root_key) # depends on [control=['except'], data=[]]
signed_data = header + result['data'] + footer_stats # depends on [control=['if'], data=['root_key']]
else:
signed_data = header + packed_readings + footer_stats
try:
signature = signer.sign_report(uuid, root_key, signed_data, report_id=report_id, sent_timestamp=sent_timestamp) # depends on [control=['try'], data=[]]
except NotFoundError:
raise ExternalError('Could not sign report because no AuthProvider supported the requested signature method for the requested device', device_id=uuid, root_key=root_key) # depends on [control=['except'], data=[]]
footer = struct.pack('16s', bytes(signature['signature'][:16]))
footer = bytearray(footer)
data = signed_data + footer
return SignedListReport(data) |
def order_by(self, *orderings: str) -> "QuerySet":
"""
Accept args to filter by in format like this:
.. code-block:: python3
.order_by('name', '-tournament__name')
Supports ordering by related models too.
"""
queryset = self._clone()
new_ordering = []
for ordering in orderings:
order_type = Order.asc
if ordering[0] == "-":
field_name = ordering[1:]
order_type = Order.desc
else:
field_name = ordering
if not (
field_name.split("__")[0] in self.model._meta.fields
or field_name in self._annotations
):
raise FieldError(
"Unknown field {} for model {}".format(field_name, self.model.__name__)
)
new_ordering.append((field_name, order_type))
queryset._orderings = new_ordering
return queryset | def function[order_by, parameter[self]]:
constant[
Accept args to filter by in format like this:
.. code-block:: python3
.order_by('name', '-tournament__name')
Supports ordering by related models too.
]
variable[queryset] assign[=] call[name[self]._clone, parameter[]]
variable[new_ordering] assign[=] list[[]]
for taget[name[ordering]] in starred[name[orderings]] begin[:]
variable[order_type] assign[=] name[Order].asc
if compare[call[name[ordering]][constant[0]] equal[==] constant[-]] begin[:]
variable[field_name] assign[=] call[name[ordering]][<ast.Slice object at 0x7da20c993310>]
variable[order_type] assign[=] name[Order].desc
if <ast.UnaryOp object at 0x7da20c9923e0> begin[:]
<ast.Raise object at 0x7da20c9915a0>
call[name[new_ordering].append, parameter[tuple[[<ast.Name object at 0x7da20c9926b0>, <ast.Name object at 0x7da20c993820>]]]]
name[queryset]._orderings assign[=] name[new_ordering]
return[name[queryset]] | keyword[def] identifier[order_by] ( identifier[self] ,* identifier[orderings] : identifier[str] )-> literal[string] :
literal[string]
identifier[queryset] = identifier[self] . identifier[_clone] ()
identifier[new_ordering] =[]
keyword[for] identifier[ordering] keyword[in] identifier[orderings] :
identifier[order_type] = identifier[Order] . identifier[asc]
keyword[if] identifier[ordering] [ literal[int] ]== literal[string] :
identifier[field_name] = identifier[ordering] [ literal[int] :]
identifier[order_type] = identifier[Order] . identifier[desc]
keyword[else] :
identifier[field_name] = identifier[ordering]
keyword[if] keyword[not] (
identifier[field_name] . identifier[split] ( literal[string] )[ literal[int] ] keyword[in] identifier[self] . identifier[model] . identifier[_meta] . identifier[fields]
keyword[or] identifier[field_name] keyword[in] identifier[self] . identifier[_annotations]
):
keyword[raise] identifier[FieldError] (
literal[string] . identifier[format] ( identifier[field_name] , identifier[self] . identifier[model] . identifier[__name__] )
)
identifier[new_ordering] . identifier[append] (( identifier[field_name] , identifier[order_type] ))
identifier[queryset] . identifier[_orderings] = identifier[new_ordering]
keyword[return] identifier[queryset] | def order_by(self, *orderings: str) -> 'QuerySet':
"""
Accept args to filter by in format like this:
.. code-block:: python3
.order_by('name', '-tournament__name')
Supports ordering by related models too.
"""
queryset = self._clone()
new_ordering = []
for ordering in orderings:
order_type = Order.asc
if ordering[0] == '-':
field_name = ordering[1:]
order_type = Order.desc # depends on [control=['if'], data=[]]
else:
field_name = ordering
if not (field_name.split('__')[0] in self.model._meta.fields or field_name in self._annotations):
raise FieldError('Unknown field {} for model {}'.format(field_name, self.model.__name__)) # depends on [control=['if'], data=[]]
new_ordering.append((field_name, order_type)) # depends on [control=['for'], data=['ordering']]
queryset._orderings = new_ordering
return queryset |
def sim(self, src, tar):
"""Return the length similarity of two strings.
Length similarity is the ratio of the length of the shorter string to
the longer.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Length similarity
Examples
--------
>>> cmp = Length()
>>> cmp.sim('cat', 'hat')
1.0
>>> cmp.sim('Niall', 'Neil')
0.8
>>> cmp.sim('aluminum', 'Catalan')
0.875
>>> cmp.sim('ATCG', 'TAGC')
1.0
"""
if src == tar:
return 1.0
if not src or not tar:
return 0.0
return (
len(src) / len(tar) if len(src) < len(tar) else len(tar) / len(src)
) | def function[sim, parameter[self, src, tar]]:
constant[Return the length similarity of two strings.
Length similarity is the ratio of the length of the shorter string to
the longer.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Length similarity
Examples
--------
>>> cmp = Length()
>>> cmp.sim('cat', 'hat')
1.0
>>> cmp.sim('Niall', 'Neil')
0.8
>>> cmp.sim('aluminum', 'Catalan')
0.875
>>> cmp.sim('ATCG', 'TAGC')
1.0
]
if compare[name[src] equal[==] name[tar]] begin[:]
return[constant[1.0]]
if <ast.BoolOp object at 0x7da2054a40a0> begin[:]
return[constant[0.0]]
return[<ast.IfExp object at 0x7da2054a7640>] | keyword[def] identifier[sim] ( identifier[self] , identifier[src] , identifier[tar] ):
literal[string]
keyword[if] identifier[src] == identifier[tar] :
keyword[return] literal[int]
keyword[if] keyword[not] identifier[src] keyword[or] keyword[not] identifier[tar] :
keyword[return] literal[int]
keyword[return] (
identifier[len] ( identifier[src] )/ identifier[len] ( identifier[tar] ) keyword[if] identifier[len] ( identifier[src] )< identifier[len] ( identifier[tar] ) keyword[else] identifier[len] ( identifier[tar] )/ identifier[len] ( identifier[src] )
) | def sim(self, src, tar):
"""Return the length similarity of two strings.
Length similarity is the ratio of the length of the shorter string to
the longer.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Length similarity
Examples
--------
>>> cmp = Length()
>>> cmp.sim('cat', 'hat')
1.0
>>> cmp.sim('Niall', 'Neil')
0.8
>>> cmp.sim('aluminum', 'Catalan')
0.875
>>> cmp.sim('ATCG', 'TAGC')
1.0
"""
if src == tar:
return 1.0 # depends on [control=['if'], data=[]]
if not src or not tar:
return 0.0 # depends on [control=['if'], data=[]]
return len(src) / len(tar) if len(src) < len(tar) else len(tar) / len(src) |
def top_level(url, fix_protocol=True):
"""Extract the top level domain from an URL."""
ext = tld.get_tld(url, fix_protocol=fix_protocol)
toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split(
ext)[0] + ext
return toplevel | def function[top_level, parameter[url, fix_protocol]]:
constant[Extract the top level domain from an URL.]
variable[ext] assign[=] call[name[tld].get_tld, parameter[name[url]]]
variable[toplevel] assign[=] binary_operation[call[call[call[constant[.].join, parameter[call[call[call[name[urlparse], parameter[name[url]]].netloc.split, parameter[constant[.]]]][<ast.Slice object at 0x7da1b1f25e70>]]].split, parameter[name[ext]]]][constant[0]] + name[ext]]
return[name[toplevel]] | keyword[def] identifier[top_level] ( identifier[url] , identifier[fix_protocol] = keyword[True] ):
literal[string]
identifier[ext] = identifier[tld] . identifier[get_tld] ( identifier[url] , identifier[fix_protocol] = identifier[fix_protocol] )
identifier[toplevel] = literal[string] . identifier[join] ( identifier[urlparse] ( identifier[url] ). identifier[netloc] . identifier[split] ( literal[string] )[- literal[int] :]). identifier[split] (
identifier[ext] )[ literal[int] ]+ identifier[ext]
keyword[return] identifier[toplevel] | def top_level(url, fix_protocol=True):
"""Extract the top level domain from an URL."""
ext = tld.get_tld(url, fix_protocol=fix_protocol)
toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split(ext)[0] + ext
return toplevel |
def ingest_from_file(self, file_descriptor, ingestion_properties):
"""Ingest from local files.
:param file_descriptor: a FileDescriptor to be ingested.
:param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
"""
if isinstance(file_descriptor, FileDescriptor):
descriptor = file_descriptor
else:
descriptor = FileDescriptor(file_descriptor)
self._ingest(descriptor.zipped_stream, descriptor.size, ingestion_properties, content_encoding="gzip")
descriptor.delete_files() | def function[ingest_from_file, parameter[self, file_descriptor, ingestion_properties]]:
constant[Ingest from local files.
:param file_descriptor: a FileDescriptor to be ingested.
:param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
]
if call[name[isinstance], parameter[name[file_descriptor], name[FileDescriptor]]] begin[:]
variable[descriptor] assign[=] name[file_descriptor]
call[name[self]._ingest, parameter[name[descriptor].zipped_stream, name[descriptor].size, name[ingestion_properties]]]
call[name[descriptor].delete_files, parameter[]] | keyword[def] identifier[ingest_from_file] ( identifier[self] , identifier[file_descriptor] , identifier[ingestion_properties] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[file_descriptor] , identifier[FileDescriptor] ):
identifier[descriptor] = identifier[file_descriptor]
keyword[else] :
identifier[descriptor] = identifier[FileDescriptor] ( identifier[file_descriptor] )
identifier[self] . identifier[_ingest] ( identifier[descriptor] . identifier[zipped_stream] , identifier[descriptor] . identifier[size] , identifier[ingestion_properties] , identifier[content_encoding] = literal[string] )
identifier[descriptor] . identifier[delete_files] () | def ingest_from_file(self, file_descriptor, ingestion_properties):
"""Ingest from local files.
:param file_descriptor: a FileDescriptor to be ingested.
:param azure.kusto.ingest.IngestionProperties ingestion_properties: Ingestion properties.
"""
if isinstance(file_descriptor, FileDescriptor):
descriptor = file_descriptor # depends on [control=['if'], data=[]]
else:
descriptor = FileDescriptor(file_descriptor)
self._ingest(descriptor.zipped_stream, descriptor.size, ingestion_properties, content_encoding='gzip')
descriptor.delete_files() |
def get_function(rule, domain, normalize, **parameters):
"""
Create a quadrature function and set default parameter values.
Args:
rule (str):
Name of quadrature rule defined in ``QUAD_FUNCTIONS``.
domain (Dist, numpy.ndarray):
Defines ``lower`` and ``upper`` that is passed quadrature rule. If
``Dist``, ``domain`` is renamed to ``dist`` and also
passed.
normalize (bool):
In the case of distributions, the abscissas and weights are not
tailored to a distribution beyond matching the bounds. If True, the
samples are normalized multiplying the weights with the density of
the distribution evaluated at the abscissas and normalized
afterwards to sum to one.
parameters (:py:data:typing.Any):
Redefining of the parameter defaults. Only add parameters that the
quadrature rule expect.
Returns:
(:py:data:typing.Callable):
Function that can be called only using argument ``order``.
"""
from ...distributions.baseclass import Dist
if isinstance(domain, Dist):
lower, upper = domain.range()
parameters["dist"] = domain
else:
lower, upper = numpy.array(domain)
parameters["lower"] = lower
parameters["upper"] = upper
quad_function = QUAD_FUNCTIONS[rule]
parameters_spec = inspect.getargspec(quad_function)[0]
parameters_spec = {key: None for key in parameters_spec}
del parameters_spec["order"]
for key in parameters_spec:
if key in parameters:
parameters_spec[key] = parameters[key]
def _quad_function(order, *args, **kws):
"""Implementation of quadrature function."""
params = parameters_spec.copy()
params.update(kws)
abscissas, weights = quad_function(order, *args, **params)
# normalize if prudent:
if rule in UNORMALIZED_QUADRATURE_RULES and normalize:
if isinstance(domain, Dist):
if len(domain) == 1:
weights *= domain.pdf(abscissas).flatten()
else:
weights *= domain.pdf(abscissas)
weights /= numpy.sum(weights)
return abscissas, weights
return _quad_function | def function[get_function, parameter[rule, domain, normalize]]:
constant[
Create a quadrature function and set default parameter values.
Args:
rule (str):
Name of quadrature rule defined in ``QUAD_FUNCTIONS``.
domain (Dist, numpy.ndarray):
Defines ``lower`` and ``upper`` that is passed quadrature rule. If
``Dist``, ``domain`` is renamed to ``dist`` and also
passed.
normalize (bool):
In the case of distributions, the abscissas and weights are not
tailored to a distribution beyond matching the bounds. If True, the
samples are normalized multiplying the weights with the density of
the distribution evaluated at the abscissas and normalized
afterwards to sum to one.
parameters (:py:data:typing.Any):
Redefining of the parameter defaults. Only add parameters that the
quadrature rule expect.
Returns:
(:py:data:typing.Callable):
Function that can be called only using argument ``order``.
]
from relative_module[distributions.baseclass] import module[Dist]
if call[name[isinstance], parameter[name[domain], name[Dist]]] begin[:]
<ast.Tuple object at 0x7da207f027a0> assign[=] call[name[domain].range, parameter[]]
call[name[parameters]][constant[dist]] assign[=] name[domain]
call[name[parameters]][constant[lower]] assign[=] name[lower]
call[name[parameters]][constant[upper]] assign[=] name[upper]
variable[quad_function] assign[=] call[name[QUAD_FUNCTIONS]][name[rule]]
variable[parameters_spec] assign[=] call[call[name[inspect].getargspec, parameter[name[quad_function]]]][constant[0]]
variable[parameters_spec] assign[=] <ast.DictComp object at 0x7da207f02950>
<ast.Delete object at 0x7da207f03a60>
for taget[name[key]] in starred[name[parameters_spec]] begin[:]
if compare[name[key] in name[parameters]] begin[:]
call[name[parameters_spec]][name[key]] assign[=] call[name[parameters]][name[key]]
def function[_quad_function, parameter[order]]:
constant[Implementation of quadrature function.]
variable[params] assign[=] call[name[parameters_spec].copy, parameter[]]
call[name[params].update, parameter[name[kws]]]
<ast.Tuple object at 0x7da1b26ac460> assign[=] call[name[quad_function], parameter[name[order], <ast.Starred object at 0x7da1b26ae710>]]
if <ast.BoolOp object at 0x7da1b26ad030> begin[:]
if call[name[isinstance], parameter[name[domain], name[Dist]]] begin[:]
if compare[call[name[len], parameter[name[domain]]] equal[==] constant[1]] begin[:]
<ast.AugAssign object at 0x7da1b26af5e0>
<ast.AugAssign object at 0x7da1b26ae380>
return[tuple[[<ast.Name object at 0x7da1b26ac9d0>, <ast.Name object at 0x7da1b26ace20>]]]
return[name[_quad_function]] | keyword[def] identifier[get_function] ( identifier[rule] , identifier[domain] , identifier[normalize] ,** identifier[parameters] ):
literal[string]
keyword[from] ... identifier[distributions] . identifier[baseclass] keyword[import] identifier[Dist]
keyword[if] identifier[isinstance] ( identifier[domain] , identifier[Dist] ):
identifier[lower] , identifier[upper] = identifier[domain] . identifier[range] ()
identifier[parameters] [ literal[string] ]= identifier[domain]
keyword[else] :
identifier[lower] , identifier[upper] = identifier[numpy] . identifier[array] ( identifier[domain] )
identifier[parameters] [ literal[string] ]= identifier[lower]
identifier[parameters] [ literal[string] ]= identifier[upper]
identifier[quad_function] = identifier[QUAD_FUNCTIONS] [ identifier[rule] ]
identifier[parameters_spec] = identifier[inspect] . identifier[getargspec] ( identifier[quad_function] )[ literal[int] ]
identifier[parameters_spec] ={ identifier[key] : keyword[None] keyword[for] identifier[key] keyword[in] identifier[parameters_spec] }
keyword[del] identifier[parameters_spec] [ literal[string] ]
keyword[for] identifier[key] keyword[in] identifier[parameters_spec] :
keyword[if] identifier[key] keyword[in] identifier[parameters] :
identifier[parameters_spec] [ identifier[key] ]= identifier[parameters] [ identifier[key] ]
keyword[def] identifier[_quad_function] ( identifier[order] ,* identifier[args] ,** identifier[kws] ):
literal[string]
identifier[params] = identifier[parameters_spec] . identifier[copy] ()
identifier[params] . identifier[update] ( identifier[kws] )
identifier[abscissas] , identifier[weights] = identifier[quad_function] ( identifier[order] ,* identifier[args] ,** identifier[params] )
keyword[if] identifier[rule] keyword[in] identifier[UNORMALIZED_QUADRATURE_RULES] keyword[and] identifier[normalize] :
keyword[if] identifier[isinstance] ( identifier[domain] , identifier[Dist] ):
keyword[if] identifier[len] ( identifier[domain] )== literal[int] :
identifier[weights] *= identifier[domain] . identifier[pdf] ( identifier[abscissas] ). identifier[flatten] ()
keyword[else] :
identifier[weights] *= identifier[domain] . identifier[pdf] ( identifier[abscissas] )
identifier[weights] /= identifier[numpy] . identifier[sum] ( identifier[weights] )
keyword[return] identifier[abscissas] , identifier[weights]
keyword[return] identifier[_quad_function] | def get_function(rule, domain, normalize, **parameters):
"""
Create a quadrature function and set default parameter values.
Args:
rule (str):
Name of quadrature rule defined in ``QUAD_FUNCTIONS``.
domain (Dist, numpy.ndarray):
Defines ``lower`` and ``upper`` that is passed quadrature rule. If
``Dist``, ``domain`` is renamed to ``dist`` and also
passed.
normalize (bool):
In the case of distributions, the abscissas and weights are not
tailored to a distribution beyond matching the bounds. If True, the
samples are normalized multiplying the weights with the density of
the distribution evaluated at the abscissas and normalized
afterwards to sum to one.
parameters (:py:data:typing.Any):
Redefining of the parameter defaults. Only add parameters that the
quadrature rule expect.
Returns:
(:py:data:typing.Callable):
Function that can be called only using argument ``order``.
"""
from ...distributions.baseclass import Dist
if isinstance(domain, Dist):
(lower, upper) = domain.range()
parameters['dist'] = domain # depends on [control=['if'], data=[]]
else:
(lower, upper) = numpy.array(domain)
parameters['lower'] = lower
parameters['upper'] = upper
quad_function = QUAD_FUNCTIONS[rule]
parameters_spec = inspect.getargspec(quad_function)[0]
parameters_spec = {key: None for key in parameters_spec}
del parameters_spec['order']
for key in parameters_spec:
if key in parameters:
parameters_spec[key] = parameters[key] # depends on [control=['if'], data=['key', 'parameters']] # depends on [control=['for'], data=['key']]
def _quad_function(order, *args, **kws):
"""Implementation of quadrature function."""
params = parameters_spec.copy()
params.update(kws)
(abscissas, weights) = quad_function(order, *args, **params)
# normalize if prudent:
if rule in UNORMALIZED_QUADRATURE_RULES and normalize:
if isinstance(domain, Dist):
if len(domain) == 1:
weights *= domain.pdf(abscissas).flatten() # depends on [control=['if'], data=[]]
else:
weights *= domain.pdf(abscissas) # depends on [control=['if'], data=[]]
weights /= numpy.sum(weights) # depends on [control=['if'], data=[]]
return (abscissas, weights)
return _quad_function |
def load_post_webhook(request):
"""
Webhook to insert/update a WordPress.com post on the local Django site.
Call this after making changes to the post in WP Admin.
The post is processed asynchronously so that a response can be returned to WordPress.com immediately.
:param request: Should contain the WordPress post ID, named "ID", in POST form data
:return: JsonResponse indicated the refresh is in progress.
"""
try:
wp_post_id = int(request.POST["ID"])
except:
raise Http404("Post does not exist")
# load this asynchronously so that the webhook gets a fast response
load_post.after_response(wp_post_id)
return JsonResponse({"status": "Refreshing wp_post_id: {}".format(wp_post_id)}) | def function[load_post_webhook, parameter[request]]:
constant[
Webhook to insert/update a WordPress.com post on the local Django site.
Call this after making changes to the post in WP Admin.
The post is processed asynchronously so that a response can be returned to WordPress.com immediately.
:param request: Should contain the WordPress post ID, named "ID", in POST form data
:return: JsonResponse indicated the refresh is in progress.
]
<ast.Try object at 0x7da1b24ada80>
call[name[load_post].after_response, parameter[name[wp_post_id]]]
return[call[name[JsonResponse], parameter[dictionary[[<ast.Constant object at 0x7da1b24aeec0>], [<ast.Call object at 0x7da1b24ac100>]]]]] | keyword[def] identifier[load_post_webhook] ( identifier[request] ):
literal[string]
keyword[try] :
identifier[wp_post_id] = identifier[int] ( identifier[request] . identifier[POST] [ literal[string] ])
keyword[except] :
keyword[raise] identifier[Http404] ( literal[string] )
identifier[load_post] . identifier[after_response] ( identifier[wp_post_id] )
keyword[return] identifier[JsonResponse] ({ literal[string] : literal[string] . identifier[format] ( identifier[wp_post_id] )}) | def load_post_webhook(request):
"""
Webhook to insert/update a WordPress.com post on the local Django site.
Call this after making changes to the post in WP Admin.
The post is processed asynchronously so that a response can be returned to WordPress.com immediately.
:param request: Should contain the WordPress post ID, named "ID", in POST form data
:return: JsonResponse indicated the refresh is in progress.
"""
try:
wp_post_id = int(request.POST['ID']) # depends on [control=['try'], data=[]]
except:
raise Http404('Post does not exist') # depends on [control=['except'], data=[]]
# load this asynchronously so that the webhook gets a fast response
load_post.after_response(wp_post_id)
return JsonResponse({'status': 'Refreshing wp_post_id: {}'.format(wp_post_id)}) |
def add_default(self, ext, content_type):
"""
Add a child ``<Default>`` element with attributes set to parameter
values.
"""
return self._add_default(extension=ext, contentType=content_type) | def function[add_default, parameter[self, ext, content_type]]:
constant[
Add a child ``<Default>`` element with attributes set to parameter
values.
]
return[call[name[self]._add_default, parameter[]]] | keyword[def] identifier[add_default] ( identifier[self] , identifier[ext] , identifier[content_type] ):
literal[string]
keyword[return] identifier[self] . identifier[_add_default] ( identifier[extension] = identifier[ext] , identifier[contentType] = identifier[content_type] ) | def add_default(self, ext, content_type):
"""
Add a child ``<Default>`` element with attributes set to parameter
values.
"""
return self._add_default(extension=ext, contentType=content_type) |
def make_table(self):
"""Make numpy array from timeseries data."""
num_records = np.sum([1 for frame in self.timeseries])
dtype = [("frame",float),("time",float),("ligand atom id",int),
("ligand atom name","|U4"),("cutoff",float),
("protein atom names",list),("protein atom ids",list),
("resid",int),("resname","|U4"),("segid","|U8") ]
out = np.empty((num_records,),dtype=dtype)
cursor=0
for contact in self.timeseries:
out[cursor] = (contact.frame, contact.time,contact.ligandatomindex,contact.ligandatomname,contact.cutoff,
contact.proteinatomname,contact.proteinatomindex,contact.resid,contact.resname,contact.segid)
cursor+=1
return out.view(np.recarray) | def function[make_table, parameter[self]]:
constant[Make numpy array from timeseries data.]
variable[num_records] assign[=] call[name[np].sum, parameter[<ast.ListComp object at 0x7da20c993f70>]]
variable[dtype] assign[=] list[[<ast.Tuple object at 0x7da20c990940>, <ast.Tuple object at 0x7da20c990ac0>, <ast.Tuple object at 0x7da20c991bd0>, <ast.Tuple object at 0x7da20c990a90>, <ast.Tuple object at 0x7da20c992830>, <ast.Tuple object at 0x7da20c992c80>, <ast.Tuple object at 0x7da20c9928c0>, <ast.Tuple object at 0x7da20c991ab0>, <ast.Tuple object at 0x7da20c9931f0>, <ast.Tuple object at 0x7da20c990100>]]
variable[out] assign[=] call[name[np].empty, parameter[tuple[[<ast.Name object at 0x7da20c991120>]]]]
variable[cursor] assign[=] constant[0]
for taget[name[contact]] in starred[name[self].timeseries] begin[:]
call[name[out]][name[cursor]] assign[=] tuple[[<ast.Attribute object at 0x7da1b24ac430>, <ast.Attribute object at 0x7da1b24ae7a0>, <ast.Attribute object at 0x7da1b24ac1f0>, <ast.Attribute object at 0x7da1b24ac760>, <ast.Attribute object at 0x7da1b24af4f0>, <ast.Attribute object at 0x7da1b24ae200>, <ast.Attribute object at 0x7da1b24affa0>, <ast.Attribute object at 0x7da1b24aea70>, <ast.Attribute object at 0x7da1b24ac9d0>, <ast.Attribute object at 0x7da1b24acc70>]]
<ast.AugAssign object at 0x7da1b24aee60>
return[call[name[out].view, parameter[name[np].recarray]]] | keyword[def] identifier[make_table] ( identifier[self] ):
literal[string]
identifier[num_records] = identifier[np] . identifier[sum] ([ literal[int] keyword[for] identifier[frame] keyword[in] identifier[self] . identifier[timeseries] ])
identifier[dtype] =[( literal[string] , identifier[float] ),( literal[string] , identifier[float] ),( literal[string] , identifier[int] ),
( literal[string] , literal[string] ),( literal[string] , identifier[float] ),
( literal[string] , identifier[list] ),( literal[string] , identifier[list] ),
( literal[string] , identifier[int] ),( literal[string] , literal[string] ),( literal[string] , literal[string] )]
identifier[out] = identifier[np] . identifier[empty] (( identifier[num_records] ,), identifier[dtype] = identifier[dtype] )
identifier[cursor] = literal[int]
keyword[for] identifier[contact] keyword[in] identifier[self] . identifier[timeseries] :
identifier[out] [ identifier[cursor] ]=( identifier[contact] . identifier[frame] , identifier[contact] . identifier[time] , identifier[contact] . identifier[ligandatomindex] , identifier[contact] . identifier[ligandatomname] , identifier[contact] . identifier[cutoff] ,
identifier[contact] . identifier[proteinatomname] , identifier[contact] . identifier[proteinatomindex] , identifier[contact] . identifier[resid] , identifier[contact] . identifier[resname] , identifier[contact] . identifier[segid] )
identifier[cursor] += literal[int]
keyword[return] identifier[out] . identifier[view] ( identifier[np] . identifier[recarray] ) | def make_table(self):
"""Make numpy array from timeseries data."""
num_records = np.sum([1 for frame in self.timeseries])
dtype = [('frame', float), ('time', float), ('ligand atom id', int), ('ligand atom name', '|U4'), ('cutoff', float), ('protein atom names', list), ('protein atom ids', list), ('resid', int), ('resname', '|U4'), ('segid', '|U8')]
out = np.empty((num_records,), dtype=dtype)
cursor = 0
for contact in self.timeseries:
out[cursor] = (contact.frame, contact.time, contact.ligandatomindex, contact.ligandatomname, contact.cutoff, contact.proteinatomname, contact.proteinatomindex, contact.resid, contact.resname, contact.segid)
cursor += 1 # depends on [control=['for'], data=['contact']]
return out.view(np.recarray) |
def exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'message': exc.detail}
set_rollback()
return Response(data, status=exc.status_code, headers=headers)
elif isinstance(exc, Http404):
msg = _('Not found.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, PermissionDenied):
msg = _('Permission denied.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_403_FORBIDDEN)
# Note: Unhandled exceptions will raise a 500 error.
return None | def function[exception_handler, parameter[exc, context]]:
constant[
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
]
if call[name[isinstance], parameter[name[exc], name[exceptions].APIException]] begin[:]
variable[headers] assign[=] dictionary[[], []]
if call[name[getattr], parameter[name[exc], constant[auth_header], constant[None]]] begin[:]
call[name[headers]][constant[WWW-Authenticate]] assign[=] name[exc].auth_header
if call[name[getattr], parameter[name[exc], constant[wait], constant[None]]] begin[:]
call[name[headers]][constant[Retry-After]] assign[=] binary_operation[constant[%d] <ast.Mod object at 0x7da2590d6920> name[exc].wait]
if call[name[isinstance], parameter[name[exc].detail, tuple[[<ast.Name object at 0x7da20e9621a0>, <ast.Name object at 0x7da20e9633a0>]]]] begin[:]
variable[data] assign[=] name[exc].detail
call[name[set_rollback], parameter[]]
return[call[name[Response], parameter[name[data]]]]
return[constant[None]] | keyword[def] identifier[exception_handler] ( identifier[exc] , identifier[context] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[exc] , identifier[exceptions] . identifier[APIException] ):
identifier[headers] ={}
keyword[if] identifier[getattr] ( identifier[exc] , literal[string] , keyword[None] ):
identifier[headers] [ literal[string] ]= identifier[exc] . identifier[auth_header]
keyword[if] identifier[getattr] ( identifier[exc] , literal[string] , keyword[None] ):
identifier[headers] [ literal[string] ]= literal[string] % identifier[exc] . identifier[wait]
keyword[if] identifier[isinstance] ( identifier[exc] . identifier[detail] ,( identifier[list] , identifier[dict] )):
identifier[data] = identifier[exc] . identifier[detail]
keyword[else] :
identifier[data] ={ literal[string] : identifier[exc] . identifier[detail] }
identifier[set_rollback] ()
keyword[return] identifier[Response] ( identifier[data] , identifier[status] = identifier[exc] . identifier[status_code] , identifier[headers] = identifier[headers] )
keyword[elif] identifier[isinstance] ( identifier[exc] , identifier[Http404] ):
identifier[msg] = identifier[_] ( literal[string] )
identifier[data] ={ literal[string] : identifier[six] . identifier[text_type] ( identifier[msg] )}
identifier[set_rollback] ()
keyword[return] identifier[Response] ( identifier[data] , identifier[status] = identifier[status] . identifier[HTTP_404_NOT_FOUND] )
keyword[elif] identifier[isinstance] ( identifier[exc] , identifier[PermissionDenied] ):
identifier[msg] = identifier[_] ( literal[string] )
identifier[data] ={ literal[string] : identifier[six] . identifier[text_type] ( identifier[msg] )}
identifier[set_rollback] ()
keyword[return] identifier[Response] ( identifier[data] , identifier[status] = identifier[status] . identifier[HTTP_403_FORBIDDEN] )
keyword[return] keyword[None] | def exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header # depends on [control=['if'], data=[]]
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait # depends on [control=['if'], data=[]]
if isinstance(exc.detail, (list, dict)):
data = exc.detail # depends on [control=['if'], data=[]]
else:
data = {'message': exc.detail}
set_rollback()
return Response(data, status=exc.status_code, headers=headers) # depends on [control=['if'], data=[]]
elif isinstance(exc, Http404):
msg = _('Not found.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_404_NOT_FOUND) # depends on [control=['if'], data=[]]
elif isinstance(exc, PermissionDenied):
msg = _('Permission denied.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_403_FORBIDDEN) # depends on [control=['if'], data=[]]
# Note: Unhandled exceptions will raise a 500 error.
return None |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_local_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
local_interface_name = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name.text = kwargs.pop('local_interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_lldp_neighbor_detail_output_lldp_neighbor_detail_local_interface_name, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_lldp_neighbor_detail] assign[=] call[name[ET].Element, parameter[constant[get_lldp_neighbor_detail]]]
variable[config] assign[=] name[get_lldp_neighbor_detail]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_lldp_neighbor_detail], constant[output]]]
variable[lldp_neighbor_detail] assign[=] call[name[ET].SubElement, parameter[name[output], constant[lldp-neighbor-detail]]]
variable[remote_interface_name_key] assign[=] call[name[ET].SubElement, parameter[name[lldp_neighbor_detail], constant[remote-interface-name]]]
name[remote_interface_name_key].text assign[=] call[name[kwargs].pop, parameter[constant[remote_interface_name]]]
variable[local_interface_name] assign[=] call[name[ET].SubElement, parameter[name[lldp_neighbor_detail], constant[local-interface-name]]]
name[local_interface_name].text assign[=] call[name[kwargs].pop, parameter[constant[local_interface_name]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_lldp_neighbor_detail_output_lldp_neighbor_detail_local_interface_name] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_lldp_neighbor_detail] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_lldp_neighbor_detail]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_lldp_neighbor_detail] , literal[string] )
identifier[lldp_neighbor_detail] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[remote_interface_name_key] = identifier[ET] . identifier[SubElement] ( identifier[lldp_neighbor_detail] , literal[string] )
identifier[remote_interface_name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[local_interface_name] = identifier[ET] . identifier[SubElement] ( identifier[lldp_neighbor_detail] , literal[string] )
identifier[local_interface_name] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_lldp_neighbor_detail_output_lldp_neighbor_detail_local_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_lldp_neighbor_detail = ET.Element('get_lldp_neighbor_detail')
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, 'output')
lldp_neighbor_detail = ET.SubElement(output, 'lldp-neighbor-detail')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, 'remote-interface-name')
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
local_interface_name = ET.SubElement(lldp_neighbor_detail, 'local-interface-name')
local_interface_name.text = kwargs.pop('local_interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def register_area(self, area_code, index, userdata):
"""Shares a memory area with the server. That memory block will be
visible by the clients.
"""
size = ctypes.sizeof(userdata)
logger.info("registering area %s, index %s, size %s" % (area_code,
index, size))
size = ctypes.sizeof(userdata)
return self.library.Srv_RegisterArea(self.pointer, area_code, index,
ctypes.byref(userdata), size) | def function[register_area, parameter[self, area_code, index, userdata]]:
constant[Shares a memory area with the server. That memory block will be
visible by the clients.
]
variable[size] assign[=] call[name[ctypes].sizeof, parameter[name[userdata]]]
call[name[logger].info, parameter[binary_operation[constant[registering area %s, index %s, size %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204345ab0>, <ast.Name object at 0x7da204345960>, <ast.Name object at 0x7da204347bb0>]]]]]
variable[size] assign[=] call[name[ctypes].sizeof, parameter[name[userdata]]]
return[call[name[self].library.Srv_RegisterArea, parameter[name[self].pointer, name[area_code], name[index], call[name[ctypes].byref, parameter[name[userdata]]], name[size]]]] | keyword[def] identifier[register_area] ( identifier[self] , identifier[area_code] , identifier[index] , identifier[userdata] ):
literal[string]
identifier[size] = identifier[ctypes] . identifier[sizeof] ( identifier[userdata] )
identifier[logger] . identifier[info] ( literal[string] %( identifier[area_code] ,
identifier[index] , identifier[size] ))
identifier[size] = identifier[ctypes] . identifier[sizeof] ( identifier[userdata] )
keyword[return] identifier[self] . identifier[library] . identifier[Srv_RegisterArea] ( identifier[self] . identifier[pointer] , identifier[area_code] , identifier[index] ,
identifier[ctypes] . identifier[byref] ( identifier[userdata] ), identifier[size] ) | def register_area(self, area_code, index, userdata):
"""Shares a memory area with the server. That memory block will be
visible by the clients.
"""
size = ctypes.sizeof(userdata)
logger.info('registering area %s, index %s, size %s' % (area_code, index, size))
size = ctypes.sizeof(userdata)
return self.library.Srv_RegisterArea(self.pointer, area_code, index, ctypes.byref(userdata), size) |
def has_instance(name, provider=None):
'''
Return true if the instance is found on a provider
CLI Example:
.. code-block:: bash
salt minionname cloud.has_instance myinstance
'''
data = get_instance(name, provider)
if data is None:
return False
return True | def function[has_instance, parameter[name, provider]]:
constant[
Return true if the instance is found on a provider
CLI Example:
.. code-block:: bash
salt minionname cloud.has_instance myinstance
]
variable[data] assign[=] call[name[get_instance], parameter[name[name], name[provider]]]
if compare[name[data] is constant[None]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[has_instance] ( identifier[name] , identifier[provider] = keyword[None] ):
literal[string]
identifier[data] = identifier[get_instance] ( identifier[name] , identifier[provider] )
keyword[if] identifier[data] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def has_instance(name, provider=None):
"""
Return true if the instance is found on a provider
CLI Example:
.. code-block:: bash
salt minionname cloud.has_instance myinstance
"""
data = get_instance(name, provider)
if data is None:
return False # depends on [control=['if'], data=[]]
return True |
def dampenings(self, trigger_id, trigger_mode=None):
"""
Get all Dampenings for a Trigger (1 Dampening per mode).
:param trigger_id: Trigger definition id to be retrieved.
:param trigger_mode: Optional TriggerMode which is only fetched
:type trigger_mode: TriggerMode
:return: List of Dampening objects
"""
if trigger_mode is not None:
url = self._service_url(['triggers', trigger_id, 'dampenings', 'mode', trigger_mode])
else:
url = self._service_url(['triggers', trigger_id, 'dampenings'])
data = self._get(url)
return Dampening.list_to_object_list(data) | def function[dampenings, parameter[self, trigger_id, trigger_mode]]:
constant[
Get all Dampenings for a Trigger (1 Dampening per mode).
:param trigger_id: Trigger definition id to be retrieved.
:param trigger_mode: Optional TriggerMode which is only fetched
:type trigger_mode: TriggerMode
:return: List of Dampening objects
]
if compare[name[trigger_mode] is_not constant[None]] begin[:]
variable[url] assign[=] call[name[self]._service_url, parameter[list[[<ast.Constant object at 0x7da2054a7550>, <ast.Name object at 0x7da2054a6c80>, <ast.Constant object at 0x7da2054a4fa0>, <ast.Constant object at 0x7da2054a6bf0>, <ast.Name object at 0x7da2054a4cd0>]]]]
variable[data] assign[=] call[name[self]._get, parameter[name[url]]]
return[call[name[Dampening].list_to_object_list, parameter[name[data]]]] | keyword[def] identifier[dampenings] ( identifier[self] , identifier[trigger_id] , identifier[trigger_mode] = keyword[None] ):
literal[string]
keyword[if] identifier[trigger_mode] keyword[is] keyword[not] keyword[None] :
identifier[url] = identifier[self] . identifier[_service_url] ([ literal[string] , identifier[trigger_id] , literal[string] , literal[string] , identifier[trigger_mode] ])
keyword[else] :
identifier[url] = identifier[self] . identifier[_service_url] ([ literal[string] , identifier[trigger_id] , literal[string] ])
identifier[data] = identifier[self] . identifier[_get] ( identifier[url] )
keyword[return] identifier[Dampening] . identifier[list_to_object_list] ( identifier[data] ) | def dampenings(self, trigger_id, trigger_mode=None):
"""
Get all Dampenings for a Trigger (1 Dampening per mode).
:param trigger_id: Trigger definition id to be retrieved.
:param trigger_mode: Optional TriggerMode which is only fetched
:type trigger_mode: TriggerMode
:return: List of Dampening objects
"""
if trigger_mode is not None:
url = self._service_url(['triggers', trigger_id, 'dampenings', 'mode', trigger_mode]) # depends on [control=['if'], data=['trigger_mode']]
else:
url = self._service_url(['triggers', trigger_id, 'dampenings'])
data = self._get(url)
return Dampening.list_to_object_list(data) |
def write_peps(self, peps, reverse_seqs):
"""Writes peps to db. We can reverse to be able to look up
peptides that have some amino acids missing at the N-terminal.
This way we can still use the index.
"""
if reverse_seqs:
peps = [(x[0][::-1],) for x in peps]
cursor = self.get_cursor()
cursor.executemany(
'INSERT INTO known_searchspace(seqs) VALUES (?)', peps)
self.conn.commit() | def function[write_peps, parameter[self, peps, reverse_seqs]]:
constant[Writes peps to db. We can reverse to be able to look up
peptides that have some amino acids missing at the N-terminal.
This way we can still use the index.
]
if name[reverse_seqs] begin[:]
variable[peps] assign[=] <ast.ListComp object at 0x7da1b2462f80>
variable[cursor] assign[=] call[name[self].get_cursor, parameter[]]
call[name[cursor].executemany, parameter[constant[INSERT INTO known_searchspace(seqs) VALUES (?)], name[peps]]]
call[name[self].conn.commit, parameter[]] | keyword[def] identifier[write_peps] ( identifier[self] , identifier[peps] , identifier[reverse_seqs] ):
literal[string]
keyword[if] identifier[reverse_seqs] :
identifier[peps] =[( identifier[x] [ literal[int] ][::- literal[int] ],) keyword[for] identifier[x] keyword[in] identifier[peps] ]
identifier[cursor] = identifier[self] . identifier[get_cursor] ()
identifier[cursor] . identifier[executemany] (
literal[string] , identifier[peps] )
identifier[self] . identifier[conn] . identifier[commit] () | def write_peps(self, peps, reverse_seqs):
"""Writes peps to db. We can reverse to be able to look up
peptides that have some amino acids missing at the N-terminal.
This way we can still use the index.
"""
if reverse_seqs:
peps = [(x[0][::-1],) for x in peps] # depends on [control=['if'], data=[]]
cursor = self.get_cursor()
cursor.executemany('INSERT INTO known_searchspace(seqs) VALUES (?)', peps)
self.conn.commit() |
def read(filename):
"""Reads an unstructured mesh with added data.
:param filenames: The files to read from.
:type filenames: str
:returns mesh{2,3}d: The mesh data.
:returns point_data: Point data read from file.
:type point_data: dict
:returns field_data: Field data read from file.
:type field_data: dict
"""
mesh = meshio.read(filename)
# make sure to include the used nodes only
if "tetra" in mesh.cells:
points, cells = _sanitize(mesh.points, mesh.cells["tetra"])
return (
MeshTetra(points, cells),
mesh.point_data,
mesh.cell_data,
mesh.field_data,
)
elif "triangle" in mesh.cells:
points, cells = _sanitize(mesh.points, mesh.cells["triangle"])
return (
MeshTri(points, cells),
mesh.point_data,
mesh.cell_data,
mesh.field_data,
)
else:
raise RuntimeError("Unknown mesh type.") | def function[read, parameter[filename]]:
constant[Reads an unstructured mesh with added data.
:param filenames: The files to read from.
:type filenames: str
:returns mesh{2,3}d: The mesh data.
:returns point_data: Point data read from file.
:type point_data: dict
:returns field_data: Field data read from file.
:type field_data: dict
]
variable[mesh] assign[=] call[name[meshio].read, parameter[name[filename]]]
if compare[constant[tetra] in name[mesh].cells] begin[:]
<ast.Tuple object at 0x7da18ede42b0> assign[=] call[name[_sanitize], parameter[name[mesh].points, call[name[mesh].cells][constant[tetra]]]]
return[tuple[[<ast.Call object at 0x7da18ede7880>, <ast.Attribute object at 0x7da18ede6fe0>, <ast.Attribute object at 0x7da18ede7460>, <ast.Attribute object at 0x7da18ede76d0>]]] | keyword[def] identifier[read] ( identifier[filename] ):
literal[string]
identifier[mesh] = identifier[meshio] . identifier[read] ( identifier[filename] )
keyword[if] literal[string] keyword[in] identifier[mesh] . identifier[cells] :
identifier[points] , identifier[cells] = identifier[_sanitize] ( identifier[mesh] . identifier[points] , identifier[mesh] . identifier[cells] [ literal[string] ])
keyword[return] (
identifier[MeshTetra] ( identifier[points] , identifier[cells] ),
identifier[mesh] . identifier[point_data] ,
identifier[mesh] . identifier[cell_data] ,
identifier[mesh] . identifier[field_data] ,
)
keyword[elif] literal[string] keyword[in] identifier[mesh] . identifier[cells] :
identifier[points] , identifier[cells] = identifier[_sanitize] ( identifier[mesh] . identifier[points] , identifier[mesh] . identifier[cells] [ literal[string] ])
keyword[return] (
identifier[MeshTri] ( identifier[points] , identifier[cells] ),
identifier[mesh] . identifier[point_data] ,
identifier[mesh] . identifier[cell_data] ,
identifier[mesh] . identifier[field_data] ,
)
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def read(filename):
"""Reads an unstructured mesh with added data.
:param filenames: The files to read from.
:type filenames: str
:returns mesh{2,3}d: The mesh data.
:returns point_data: Point data read from file.
:type point_data: dict
:returns field_data: Field data read from file.
:type field_data: dict
"""
mesh = meshio.read(filename)
# make sure to include the used nodes only
if 'tetra' in mesh.cells:
(points, cells) = _sanitize(mesh.points, mesh.cells['tetra'])
return (MeshTetra(points, cells), mesh.point_data, mesh.cell_data, mesh.field_data) # depends on [control=['if'], data=[]]
elif 'triangle' in mesh.cells:
(points, cells) = _sanitize(mesh.points, mesh.cells['triangle'])
return (MeshTri(points, cells), mesh.point_data, mesh.cell_data, mesh.field_data) # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Unknown mesh type.') |
def _load_yaml_config(path=None):
"""Open and return the yaml contents."""
furious_yaml_path = path or find_furious_yaml()
if furious_yaml_path is None:
logging.debug("furious.yaml not found.")
return None
with open(furious_yaml_path) as yaml_file:
return yaml_file.read() | def function[_load_yaml_config, parameter[path]]:
constant[Open and return the yaml contents.]
variable[furious_yaml_path] assign[=] <ast.BoolOp object at 0x7da207f99ff0>
if compare[name[furious_yaml_path] is constant[None]] begin[:]
call[name[logging].debug, parameter[constant[furious.yaml not found.]]]
return[constant[None]]
with call[name[open], parameter[name[furious_yaml_path]]] begin[:]
return[call[name[yaml_file].read, parameter[]]] | keyword[def] identifier[_load_yaml_config] ( identifier[path] = keyword[None] ):
literal[string]
identifier[furious_yaml_path] = identifier[path] keyword[or] identifier[find_furious_yaml] ()
keyword[if] identifier[furious_yaml_path] keyword[is] keyword[None] :
identifier[logging] . identifier[debug] ( literal[string] )
keyword[return] keyword[None]
keyword[with] identifier[open] ( identifier[furious_yaml_path] ) keyword[as] identifier[yaml_file] :
keyword[return] identifier[yaml_file] . identifier[read] () | def _load_yaml_config(path=None):
"""Open and return the yaml contents."""
furious_yaml_path = path or find_furious_yaml()
if furious_yaml_path is None:
logging.debug('furious.yaml not found.')
return None # depends on [control=['if'], data=[]]
with open(furious_yaml_path) as yaml_file:
return yaml_file.read() # depends on [control=['with'], data=['yaml_file']] |
def compute_verdict(self, results):
"""
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
"""
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to reject the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to quarantine the message based on '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to accept the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
logger.debug(
'<{0}> Suggesting to accept the message, no verdict class matched '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT | def function[compute_verdict, parameter[self, results]]:
constant[
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
]
if compare[call[name[results]][constant[class]] in name[self].reject_classes] begin[:]
variable[threshold] assign[=] call[name[self].reject_classes][call[name[results]][constant[class]]]
if compare[call[name[float], parameter[call[name[results]][constant[confidence]]]] greater_or_equal[>=] name[threshold]] begin[:]
call[name[logger].debug, parameter[call[constant[<{0}> Suggesting to reject the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}].format, parameter[name[self].id, name[results]]]]]
return[name[self].VERDICT_REJECT]
if compare[call[name[results]][constant[class]] in name[self].quarantine_classes] begin[:]
variable[threshold] assign[=] call[name[self].quarantine_classes][call[name[results]][constant[class]]]
if compare[call[name[float], parameter[call[name[results]][constant[confidence]]]] greater_or_equal[>=] name[threshold]] begin[:]
call[name[logger].debug, parameter[call[constant[<{0}> Suggesting to quarantine the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}].format, parameter[name[self].id, name[results]]]]]
return[name[self].VERDICT_QUARANTINE]
if compare[call[name[results]][constant[class]] in name[self].accept_classes] begin[:]
variable[threshold] assign[=] call[name[self].accept_classes][call[name[results]][constant[class]]]
if compare[call[name[float], parameter[call[name[results]][constant[confidence]]]] greater_or_equal[>=] name[threshold]] begin[:]
call[name[logger].debug, parameter[call[constant[<{0}> Suggesting to accept the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}].format, parameter[name[self].id, name[results]]]]]
return[name[self].VERDICT_ACCEPT]
call[name[logger].debug, parameter[call[constant[<{0}> Suggesting to accept the message, no verdict class matched DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}].format, parameter[name[self].id, name[results]]]]]
return[name[self].VERDICT_ACCEPT] | keyword[def] identifier[compute_verdict] ( identifier[self] , identifier[results] ):
literal[string]
keyword[if] identifier[results] [ literal[string] ] keyword[in] identifier[self] . identifier[reject_classes] :
identifier[threshold] = identifier[self] . identifier[reject_classes] [ identifier[results] [ literal[string] ]]
keyword[if] identifier[float] ( identifier[results] [ literal[string] ])>= identifier[threshold] :
identifier[logger] . identifier[debug] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[id] , identifier[results] ))
keyword[return] identifier[self] . identifier[VERDICT_REJECT]
keyword[if] identifier[results] [ literal[string] ] keyword[in] identifier[self] . identifier[quarantine_classes] :
identifier[threshold] = identifier[self] . identifier[quarantine_classes] [ identifier[results] [ literal[string] ]]
keyword[if] identifier[float] ( identifier[results] [ literal[string] ])>= identifier[threshold] :
identifier[logger] . identifier[debug] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[id] , identifier[results] ))
keyword[return] identifier[self] . identifier[VERDICT_QUARANTINE]
keyword[if] identifier[results] [ literal[string] ] keyword[in] identifier[self] . identifier[accept_classes] :
identifier[threshold] = identifier[self] . identifier[accept_classes] [ identifier[results] [ literal[string] ]]
keyword[if] identifier[float] ( identifier[results] [ literal[string] ])>= identifier[threshold] :
identifier[logger] . identifier[debug] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[id] , identifier[results] ))
keyword[return] identifier[self] . identifier[VERDICT_ACCEPT]
identifier[logger] . identifier[debug] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[id] , identifier[results] ))
keyword[return] identifier[self] . identifier[VERDICT_ACCEPT] | def compute_verdict(self, results):
"""
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
"""
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug('<{0}> Suggesting to reject the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug('<{0}> Suggesting to quarantine the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug('<{0}> Suggesting to accept the message based on DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
logger.debug('<{0}> Suggesting to accept the message, no verdict class matched DSPAM results: user={1[user]}, class={1[class]}, confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT |
def _GetMessageFileKeys(self, event_log_provider_key):
"""Retrieves the message file keys.
Args:
event_log_provider_key (int): Event Log provider key.
Yields:
int: message file key.
"""
table_names = ['message_file_per_event_log_provider']
column_names = ['message_file_key']
condition = 'event_log_provider_key == {0:d}'.format(
event_log_provider_key)
generator = self._database_file.GetValues(
table_names, column_names, condition)
for values in generator:
yield values['message_file_key'] | def function[_GetMessageFileKeys, parameter[self, event_log_provider_key]]:
constant[Retrieves the message file keys.
Args:
event_log_provider_key (int): Event Log provider key.
Yields:
int: message file key.
]
variable[table_names] assign[=] list[[<ast.Constant object at 0x7da18c4cc670>]]
variable[column_names] assign[=] list[[<ast.Constant object at 0x7da20c6a9fc0>]]
variable[condition] assign[=] call[constant[event_log_provider_key == {0:d}].format, parameter[name[event_log_provider_key]]]
variable[generator] assign[=] call[name[self]._database_file.GetValues, parameter[name[table_names], name[column_names], name[condition]]]
for taget[name[values]] in starred[name[generator]] begin[:]
<ast.Yield object at 0x7da20c6ab4c0> | keyword[def] identifier[_GetMessageFileKeys] ( identifier[self] , identifier[event_log_provider_key] ):
literal[string]
identifier[table_names] =[ literal[string] ]
identifier[column_names] =[ literal[string] ]
identifier[condition] = literal[string] . identifier[format] (
identifier[event_log_provider_key] )
identifier[generator] = identifier[self] . identifier[_database_file] . identifier[GetValues] (
identifier[table_names] , identifier[column_names] , identifier[condition] )
keyword[for] identifier[values] keyword[in] identifier[generator] :
keyword[yield] identifier[values] [ literal[string] ] | def _GetMessageFileKeys(self, event_log_provider_key):
"""Retrieves the message file keys.
Args:
event_log_provider_key (int): Event Log provider key.
Yields:
int: message file key.
"""
table_names = ['message_file_per_event_log_provider']
column_names = ['message_file_key']
condition = 'event_log_provider_key == {0:d}'.format(event_log_provider_key)
generator = self._database_file.GetValues(table_names, column_names, condition)
for values in generator:
yield values['message_file_key'] # depends on [control=['for'], data=['values']] |
def receive_promise(self, msg):
'''
Returns an Accept messages if a quorum of Promise messages is achieved
'''
self.observe_proposal(msg.proposal_id)
if not self.leader and msg.proposal_id == self.proposal_id and msg.from_uid not in self.promises_received:
self.promises_received.add(msg.from_uid)
if self.highest_accepted_id is None or msg.last_accepted_id > self.highest_accepted_id:
self.highest_accepted_id = msg.last_accepted_id
if msg.last_accepted_value is not None:
self.proposed_value = msg.last_accepted_value
if len(self.promises_received) == self.quorum_size:
self.leader = True
if self.proposed_value is not None:
self.current_accept_msg = Accept(self.network_uid, self.proposal_id, self.proposed_value)
return self.current_accept_msg | def function[receive_promise, parameter[self, msg]]:
constant[
Returns an Accept messages if a quorum of Promise messages is achieved
]
call[name[self].observe_proposal, parameter[name[msg].proposal_id]]
if <ast.BoolOp object at 0x7da1b16c1f00> begin[:]
call[name[self].promises_received.add, parameter[name[msg].from_uid]]
if <ast.BoolOp object at 0x7da1b16c16f0> begin[:]
name[self].highest_accepted_id assign[=] name[msg].last_accepted_id
if compare[name[msg].last_accepted_value is_not constant[None]] begin[:]
name[self].proposed_value assign[=] name[msg].last_accepted_value
if compare[call[name[len], parameter[name[self].promises_received]] equal[==] name[self].quorum_size] begin[:]
name[self].leader assign[=] constant[True]
if compare[name[self].proposed_value is_not constant[None]] begin[:]
name[self].current_accept_msg assign[=] call[name[Accept], parameter[name[self].network_uid, name[self].proposal_id, name[self].proposed_value]]
return[name[self].current_accept_msg] | keyword[def] identifier[receive_promise] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[self] . identifier[observe_proposal] ( identifier[msg] . identifier[proposal_id] )
keyword[if] keyword[not] identifier[self] . identifier[leader] keyword[and] identifier[msg] . identifier[proposal_id] == identifier[self] . identifier[proposal_id] keyword[and] identifier[msg] . identifier[from_uid] keyword[not] keyword[in] identifier[self] . identifier[promises_received] :
identifier[self] . identifier[promises_received] . identifier[add] ( identifier[msg] . identifier[from_uid] )
keyword[if] identifier[self] . identifier[highest_accepted_id] keyword[is] keyword[None] keyword[or] identifier[msg] . identifier[last_accepted_id] > identifier[self] . identifier[highest_accepted_id] :
identifier[self] . identifier[highest_accepted_id] = identifier[msg] . identifier[last_accepted_id]
keyword[if] identifier[msg] . identifier[last_accepted_value] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[proposed_value] = identifier[msg] . identifier[last_accepted_value]
keyword[if] identifier[len] ( identifier[self] . identifier[promises_received] )== identifier[self] . identifier[quorum_size] :
identifier[self] . identifier[leader] = keyword[True]
keyword[if] identifier[self] . identifier[proposed_value] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[current_accept_msg] = identifier[Accept] ( identifier[self] . identifier[network_uid] , identifier[self] . identifier[proposal_id] , identifier[self] . identifier[proposed_value] )
keyword[return] identifier[self] . identifier[current_accept_msg] | def receive_promise(self, msg):
"""
Returns an Accept messages if a quorum of Promise messages is achieved
"""
self.observe_proposal(msg.proposal_id)
if not self.leader and msg.proposal_id == self.proposal_id and (msg.from_uid not in self.promises_received):
self.promises_received.add(msg.from_uid)
if self.highest_accepted_id is None or msg.last_accepted_id > self.highest_accepted_id:
self.highest_accepted_id = msg.last_accepted_id
if msg.last_accepted_value is not None:
self.proposed_value = msg.last_accepted_value # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if len(self.promises_received) == self.quorum_size:
self.leader = True
if self.proposed_value is not None:
self.current_accept_msg = Accept(self.network_uid, self.proposal_id, self.proposed_value)
return self.current_accept_msg # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def h2i(self, pkt, h):
"""human x.x.x.x/y to internal"""
ip,mask = re.split( '/', h)
return int(mask), ip | def function[h2i, parameter[self, pkt, h]]:
constant[human x.x.x.x/y to internal]
<ast.Tuple object at 0x7da1b138e710> assign[=] call[name[re].split, parameter[constant[/], name[h]]]
return[tuple[[<ast.Call object at 0x7da1b138d630>, <ast.Name object at 0x7da1b138eb00>]]] | keyword[def] identifier[h2i] ( identifier[self] , identifier[pkt] , identifier[h] ):
literal[string]
identifier[ip] , identifier[mask] = identifier[re] . identifier[split] ( literal[string] , identifier[h] )
keyword[return] identifier[int] ( identifier[mask] ), identifier[ip] | def h2i(self, pkt, h):
"""human x.x.x.x/y to internal"""
(ip, mask) = re.split('/', h)
return (int(mask), ip) |
def connect_post_namespaced_pod_attach(self, name, namespace, **kwargs): # noqa: E501
"""connect_post_namespaced_pod_attach # noqa: E501
connect POST requests to attach of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_pod_attach(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodAttachOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str container: The container in which to execute the command. Defaults to only container if there is only one container in the pod.
:param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.
:param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.
:param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.
:param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.connect_post_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | def function[connect_post_namespaced_pod_attach, parameter[self, name, namespace]]:
constant[connect_post_namespaced_pod_attach # noqa: E501
connect POST requests to attach of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_pod_attach(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodAttachOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str container: The container in which to execute the command. Defaults to only container if there is only one container in the pod.
:param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.
:param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.
:param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.
:param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.
:return: str
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].connect_post_namespaced_pod_attach_with_http_info, parameter[name[name], name[namespace]]]] | keyword[def] identifier[connect_post_namespaced_pod_attach] ( identifier[self] , identifier[name] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[connect_post_namespaced_pod_attach_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[connect_post_namespaced_pod_attach_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data] | def connect_post_namespaced_pod_attach(self, name, namespace, **kwargs): # noqa: E501
'connect_post_namespaced_pod_attach # noqa: E501\n\n connect POST requests to attach of Pod # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.connect_post_namespaced_pod_attach(name, namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the PodAttachOptions (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param str container: The container in which to execute the command. Defaults to only container if there is only one container in the pod.\n :param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.\n :param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.\n :param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.\n :param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.connect_post_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def _get(self, resource, payload=None):
''' Wrapper around requests.get that shorten caller url and takes care
of errors '''
# Avoid dangerous default function argument `{}`
payload = payload or {}
# Build the request and return json response
return requests.get(
'{}/{}/{}'.format(
self.master, pyconsul.__consul_api_version__, resource),
params=payload
) | def function[_get, parameter[self, resource, payload]]:
constant[ Wrapper around requests.get that shorten caller url and takes care
of errors ]
variable[payload] assign[=] <ast.BoolOp object at 0x7da1b0ba97e0>
return[call[name[requests].get, parameter[call[constant[{}/{}/{}].format, parameter[name[self].master, name[pyconsul].__consul_api_version__, name[resource]]]]]] | keyword[def] identifier[_get] ( identifier[self] , identifier[resource] , identifier[payload] = keyword[None] ):
literal[string]
identifier[payload] = identifier[payload] keyword[or] {}
keyword[return] identifier[requests] . identifier[get] (
literal[string] . identifier[format] (
identifier[self] . identifier[master] , identifier[pyconsul] . identifier[__consul_api_version__] , identifier[resource] ),
identifier[params] = identifier[payload]
) | def _get(self, resource, payload=None):
""" Wrapper around requests.get that shorten caller url and takes care
of errors """
# Avoid dangerous default function argument `{}`
payload = payload or {}
# Build the request and return json response
return requests.get('{}/{}/{}'.format(self.master, pyconsul.__consul_api_version__, resource), params=payload) |
def remove_tier(self, id_tier, clean=True):
"""Remove a tier.
:param str id_tier: Name of the tier.
:param bool clean: Flag to also clean the timeslots.
:raises KeyError: If tier is non existent.
"""
del(self.tiers[id_tier])
if clean:
self.clean_time_slots() | def function[remove_tier, parameter[self, id_tier, clean]]:
constant[Remove a tier.
:param str id_tier: Name of the tier.
:param bool clean: Flag to also clean the timeslots.
:raises KeyError: If tier is non existent.
]
<ast.Delete object at 0x7da1b026f1f0>
if name[clean] begin[:]
call[name[self].clean_time_slots, parameter[]] | keyword[def] identifier[remove_tier] ( identifier[self] , identifier[id_tier] , identifier[clean] = keyword[True] ):
literal[string]
keyword[del] ( identifier[self] . identifier[tiers] [ identifier[id_tier] ])
keyword[if] identifier[clean] :
identifier[self] . identifier[clean_time_slots] () | def remove_tier(self, id_tier, clean=True):
"""Remove a tier.
:param str id_tier: Name of the tier.
:param bool clean: Flag to also clean the timeslots.
:raises KeyError: If tier is non existent.
"""
del self.tiers[id_tier]
if clean:
self.clean_time_slots() # depends on [control=['if'], data=[]] |
def _parse_contract(self, player_info):
"""
Parse the player's contract.
Depending on the player's contract status, a contract table is located
at the bottom of the stats page and includes player wages by season. If
found, create a dictionary housing the wages by season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
contract = {}
salary_table = player_info('table#br-salaries')
for row in salary_table('tbody tr').items():
if 'class="spacer partial_table"' in str(row):
continue
year = row('th[data-stat="year_ID"]').text()
if year.strip() == '':
continue
age = row('td[data-stat="age"]').text()
team = self._parse_team_name(str(row('td[data-stat="team_name"]')))
salary = row('td[data-stat="Salary"]').text()
contract[year] = {
'age': age,
'team': team,
'salary': salary
}
setattr(self, '_contract', contract) | def function[_parse_contract, parameter[self, player_info]]:
constant[
Parse the player's contract.
Depending on the player's contract status, a contract table is located
at the bottom of the stats page and includes player wages by season. If
found, create a dictionary housing the wages by season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
]
variable[contract] assign[=] dictionary[[], []]
variable[salary_table] assign[=] call[name[player_info], parameter[constant[table#br-salaries]]]
for taget[name[row]] in starred[call[call[name[salary_table], parameter[constant[tbody tr]]].items, parameter[]]] begin[:]
if compare[constant[class="spacer partial_table"] in call[name[str], parameter[name[row]]]] begin[:]
continue
variable[year] assign[=] call[call[name[row], parameter[constant[th[data-stat="year_ID"]]]].text, parameter[]]
if compare[call[name[year].strip, parameter[]] equal[==] constant[]] begin[:]
continue
variable[age] assign[=] call[call[name[row], parameter[constant[td[data-stat="age"]]]].text, parameter[]]
variable[team] assign[=] call[name[self]._parse_team_name, parameter[call[name[str], parameter[call[name[row], parameter[constant[td[data-stat="team_name"]]]]]]]]
variable[salary] assign[=] call[call[name[row], parameter[constant[td[data-stat="Salary"]]]].text, parameter[]]
call[name[contract]][name[year]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b452d0>, <ast.Constant object at 0x7da1b0b45300>, <ast.Constant object at 0x7da1b0b45330>], [<ast.Name object at 0x7da1b0b45360>, <ast.Name object at 0x7da1b0b45390>, <ast.Name object at 0x7da1b0b453c0>]]
call[name[setattr], parameter[name[self], constant[_contract], name[contract]]] | keyword[def] identifier[_parse_contract] ( identifier[self] , identifier[player_info] ):
literal[string]
identifier[contract] ={}
identifier[salary_table] = identifier[player_info] ( literal[string] )
keyword[for] identifier[row] keyword[in] identifier[salary_table] ( literal[string] ). identifier[items] ():
keyword[if] literal[string] keyword[in] identifier[str] ( identifier[row] ):
keyword[continue]
identifier[year] = identifier[row] ( literal[string] ). identifier[text] ()
keyword[if] identifier[year] . identifier[strip] ()== literal[string] :
keyword[continue]
identifier[age] = identifier[row] ( literal[string] ). identifier[text] ()
identifier[team] = identifier[self] . identifier[_parse_team_name] ( identifier[str] ( identifier[row] ( literal[string] )))
identifier[salary] = identifier[row] ( literal[string] ). identifier[text] ()
identifier[contract] [ identifier[year] ]={
literal[string] : identifier[age] ,
literal[string] : identifier[team] ,
literal[string] : identifier[salary]
}
identifier[setattr] ( identifier[self] , literal[string] , identifier[contract] ) | def _parse_contract(self, player_info):
"""
Parse the player's contract.
Depending on the player's contract status, a contract table is located
at the bottom of the stats page and includes player wages by season. If
found, create a dictionary housing the wages by season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
contract = {}
salary_table = player_info('table#br-salaries')
for row in salary_table('tbody tr').items():
if 'class="spacer partial_table"' in str(row):
continue # depends on [control=['if'], data=[]]
year = row('th[data-stat="year_ID"]').text()
if year.strip() == '':
continue # depends on [control=['if'], data=[]]
age = row('td[data-stat="age"]').text()
team = self._parse_team_name(str(row('td[data-stat="team_name"]')))
salary = row('td[data-stat="Salary"]').text()
contract[year] = {'age': age, 'team': team, 'salary': salary} # depends on [control=['for'], data=['row']]
setattr(self, '_contract', contract) |
def to_json(self):
"""
Serialises a HucitAuthor to a JSON formatted string.
Example:
>> homer = kb.get_resource_by_urn("urn:cts:greekLit:tlg0012")
>> homer.to_json()
{
"name_abbreviations": [
"Hom."
],
"urn": "urn:cts:greekLit:tlg0012",
"works": [
{
"urn": "urn:cts:greekLit:tlg0012.tlg001",
"titles": [
{
"language": "it",
"label": "Iliade"
},
{
"language": "la",
"label": "Ilias"
},
{
"language": "en",
"label": "Iliad"
},
{
"language": "de",
"label": "Ilias"
},
{
"language": "fr",
"label": "L'Iliade"
}
],
"uri": "http://purl.org/hucit/kb/works/2815",
"title_abbreviations": [
"Il."
]
},
{
"urn": "urn:cts:greekLit:tlg0012.tlg002",
"titles": [
{
"language": "en",
"label": "Odyssey"
},
{
"language": "fr",
"label": "L'Odyss\u00e9e"
},
{
"language": "it",
"label": "Odissea"
},
{
"language": "la",
"label": "Odyssea"
},
{
"language": "de",
"label": "Odyssee"
}
],
"uri": "http://purl.org/hucit/kb/works/2816",
"title_abbreviations": [
"Od."
]
},
{
"urn": "urn:cts:cwkb:927.2814",
"titles": [
{
"language": "la",
"label": "Epigrammata"
}
],
"uri": "http://purl.org/hucit/kb/works/2814",
"title_abbreviations": [
"Epigr."
]
}
],
"uri": "http://purl.org/hucit/kb/authors/927",
"names": [
{
"language": "fr",
"label": "Hom\u00e8re"
},
{
"language": "la",
"label": "Homerus"
},
{
"language": null,
"label": "Homeros"
},
{
"language": "en",
"label": "Homer"
},
{
"language": "it",
"label": "Omero"
}
]
}
"""
names = self.get_names()
return json.dumps({
"uri" : self.subject
, "urn" : str(self.get_urn())
, "names" : [{"language":lang, "label":label} for lang, label in names]
, "name_abbreviations" : self.get_abbreviations()
, "works" : [json.loads(work.to_json()) for work in self.get_works()]
}, indent=2) | def function[to_json, parameter[self]]:
constant[
Serialises a HucitAuthor to a JSON formatted string.
Example:
>> homer = kb.get_resource_by_urn("urn:cts:greekLit:tlg0012")
>> homer.to_json()
{
"name_abbreviations": [
"Hom."
],
"urn": "urn:cts:greekLit:tlg0012",
"works": [
{
"urn": "urn:cts:greekLit:tlg0012.tlg001",
"titles": [
{
"language": "it",
"label": "Iliade"
},
{
"language": "la",
"label": "Ilias"
},
{
"language": "en",
"label": "Iliad"
},
{
"language": "de",
"label": "Ilias"
},
{
"language": "fr",
"label": "L'Iliade"
}
],
"uri": "http://purl.org/hucit/kb/works/2815",
"title_abbreviations": [
"Il."
]
},
{
"urn": "urn:cts:greekLit:tlg0012.tlg002",
"titles": [
{
"language": "en",
"label": "Odyssey"
},
{
"language": "fr",
"label": "L'Odyssée"
},
{
"language": "it",
"label": "Odissea"
},
{
"language": "la",
"label": "Odyssea"
},
{
"language": "de",
"label": "Odyssee"
}
],
"uri": "http://purl.org/hucit/kb/works/2816",
"title_abbreviations": [
"Od."
]
},
{
"urn": "urn:cts:cwkb:927.2814",
"titles": [
{
"language": "la",
"label": "Epigrammata"
}
],
"uri": "http://purl.org/hucit/kb/works/2814",
"title_abbreviations": [
"Epigr."
]
}
],
"uri": "http://purl.org/hucit/kb/authors/927",
"names": [
{
"language": "fr",
"label": "Homère"
},
{
"language": "la",
"label": "Homerus"
},
{
"language": null,
"label": "Homeros"
},
{
"language": "en",
"label": "Homer"
},
{
"language": "it",
"label": "Omero"
}
]
}
]
variable[names] assign[=] call[name[self].get_names, parameter[]]
return[call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da20c6e6590>, <ast.Constant object at 0x7da20c6e49a0>, <ast.Constant object at 0x7da20c6e73a0>, <ast.Constant object at 0x7da20c6e5810>, <ast.Constant object at 0x7da20c6e77f0>], [<ast.Attribute object at 0x7da20c6e4bb0>, <ast.Call object at 0x7da20c6e5840>, <ast.ListComp object at 0x7da20c6e7c40>, <ast.Call object at 0x7da20c6e5ed0>, <ast.ListComp object at 0x7da20c6e7d90>]]]]] | keyword[def] identifier[to_json] ( identifier[self] ):
literal[string]
identifier[names] = identifier[self] . identifier[get_names] ()
keyword[return] identifier[json] . identifier[dumps] ({
literal[string] : identifier[self] . identifier[subject]
, literal[string] : identifier[str] ( identifier[self] . identifier[get_urn] ())
, literal[string] :[{ literal[string] : identifier[lang] , literal[string] : identifier[label] } keyword[for] identifier[lang] , identifier[label] keyword[in] identifier[names] ]
, literal[string] : identifier[self] . identifier[get_abbreviations] ()
, literal[string] :[ identifier[json] . identifier[loads] ( identifier[work] . identifier[to_json] ()) keyword[for] identifier[work] keyword[in] identifier[self] . identifier[get_works] ()]
}, identifier[indent] = literal[int] ) | def to_json(self):
"""
Serialises a HucitAuthor to a JSON formatted string.
Example:
>> homer = kb.get_resource_by_urn("urn:cts:greekLit:tlg0012")
>> homer.to_json()
{
"name_abbreviations": [
"Hom."
],
"urn": "urn:cts:greekLit:tlg0012",
"works": [
{
"urn": "urn:cts:greekLit:tlg0012.tlg001",
"titles": [
{
"language": "it",
"label": "Iliade"
},
{
"language": "la",
"label": "Ilias"
},
{
"language": "en",
"label": "Iliad"
},
{
"language": "de",
"label": "Ilias"
},
{
"language": "fr",
"label": "L'Iliade"
}
],
"uri": "http://purl.org/hucit/kb/works/2815",
"title_abbreviations": [
"Il."
]
},
{
"urn": "urn:cts:greekLit:tlg0012.tlg002",
"titles": [
{
"language": "en",
"label": "Odyssey"
},
{
"language": "fr",
"label": "L'Odyssée"
},
{
"language": "it",
"label": "Odissea"
},
{
"language": "la",
"label": "Odyssea"
},
{
"language": "de",
"label": "Odyssee"
}
],
"uri": "http://purl.org/hucit/kb/works/2816",
"title_abbreviations": [
"Od."
]
},
{
"urn": "urn:cts:cwkb:927.2814",
"titles": [
{
"language": "la",
"label": "Epigrammata"
}
],
"uri": "http://purl.org/hucit/kb/works/2814",
"title_abbreviations": [
"Epigr."
]
}
],
"uri": "http://purl.org/hucit/kb/authors/927",
"names": [
{
"language": "fr",
"label": "Homère"
},
{
"language": "la",
"label": "Homerus"
},
{
"language": null,
"label": "Homeros"
},
{
"language": "en",
"label": "Homer"
},
{
"language": "it",
"label": "Omero"
}
]
}
"""
names = self.get_names()
return json.dumps({'uri': self.subject, 'urn': str(self.get_urn()), 'names': [{'language': lang, 'label': label} for (lang, label) in names], 'name_abbreviations': self.get_abbreviations(), 'works': [json.loads(work.to_json()) for work in self.get_works()]}, indent=2) |
def pull(self):
"""Print out information about each packet from the input_stream"""
# For each packet in the pcap process the contents
for item in self.input_stream:
# Print out the timestamp in UTC
print('Timestamp: %s' % item['timestamp'])
# Unpack the Ethernet frame (mac src/dst, ethertype)
print('Ethernet Frame: %s --> %s (type: %d)' % \
(net_utils.mac_to_str(item['eth']['src']), net_utils.mac_to_str(item['eth']['dst']), item['eth']['type']))
# Print out the Packet info
packet_type = item['packet']['type']
print('Packet: %s ' % packet_type, end='')
packet = item['packet']
if packet_type in ['IP', 'IP6']:
print('%s --> %s (len:%d ttl:%d)' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst']),
packet['len'], packet['ttl']), end='')
if packet_type == 'IP':
print('-- Frag(df:%d mf:%d offset:%d)' % (packet['df'], packet['mf'], packet['offset']))
else:
print()
else:
print(str(packet))
# Print out transport and application layers
if item['transport']:
transport_info = item['transport']
print('Transport: %s ' % transport_info['type'], end='')
for key, value in compat.iteritems(transport_info):
if key != 'data':
print(key+':'+repr(value), end=' ')
# Give summary info about data
data = transport_info['data']
print('\nData: %d bytes' % len(data), end='')
if data:
print('(%s...)' % repr(data)[:30])
else:
print()
# Application data
if item['application']:
print('Application: %s' % item['application']['type'], end='')
print(str(item['application']))
# Is there domain info?
if 'src_domain' in packet:
print('Domains: %s --> %s' % (packet['src_domain'], packet['dst_domain']))
# Tags
if 'tags' in item:
print(list(item['tags']))
print() | def function[pull, parameter[self]]:
constant[Print out information about each packet from the input_stream]
for taget[name[item]] in starred[name[self].input_stream] begin[:]
call[name[print], parameter[binary_operation[constant[Timestamp: %s] <ast.Mod object at 0x7da2590d6920> call[name[item]][constant[timestamp]]]]]
call[name[print], parameter[binary_operation[constant[Ethernet Frame: %s --> %s (type: %d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b1a75ba0>, <ast.Call object at 0x7da1b1a75270>, <ast.Subscript object at 0x7da1b19d39a0>]]]]]
variable[packet_type] assign[=] call[call[name[item]][constant[packet]]][constant[type]]
call[name[print], parameter[binary_operation[constant[Packet: %s ] <ast.Mod object at 0x7da2590d6920> name[packet_type]]]]
variable[packet] assign[=] call[name[item]][constant[packet]]
if compare[name[packet_type] in list[[<ast.Constant object at 0x7da1b19d39d0>, <ast.Constant object at 0x7da1b19d3d90>]]] begin[:]
call[name[print], parameter[binary_operation[constant[%s --> %s (len:%d ttl:%d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b19d0400>, <ast.Call object at 0x7da1b19d30a0>, <ast.Subscript object at 0x7da1b19d05b0>, <ast.Subscript object at 0x7da1b19d3820>]]]]]
if compare[name[packet_type] equal[==] constant[IP]] begin[:]
call[name[print], parameter[binary_operation[constant[-- Frag(df:%d mf:%d offset:%d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b1800790>, <ast.Subscript object at 0x7da1b1803b50>, <ast.Subscript object at 0x7da1b1800520>]]]]]
if call[name[item]][constant[transport]] begin[:]
variable[transport_info] assign[=] call[name[item]][constant[transport]]
call[name[print], parameter[binary_operation[constant[Transport: %s ] <ast.Mod object at 0x7da2590d6920> call[name[transport_info]][constant[type]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b18034f0>, <ast.Name object at 0x7da1b18029e0>]]] in starred[call[name[compat].iteritems, parameter[name[transport_info]]]] begin[:]
if compare[name[key] not_equal[!=] constant[data]] begin[:]
call[name[print], parameter[binary_operation[binary_operation[name[key] + constant[:]] + call[name[repr], parameter[name[value]]]]]]
variable[data] assign[=] call[name[transport_info]][constant[data]]
call[name[print], parameter[binary_operation[constant[
Data: %d bytes] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[data]]]]]]
if name[data] begin[:]
call[name[print], parameter[binary_operation[constant[(%s...)] <ast.Mod object at 0x7da2590d6920> call[call[name[repr], parameter[name[data]]]][<ast.Slice object at 0x7da1b1802650>]]]]
if call[name[item]][constant[application]] begin[:]
call[name[print], parameter[binary_operation[constant[Application: %s] <ast.Mod object at 0x7da2590d6920> call[call[name[item]][constant[application]]][constant[type]]]]]
call[name[print], parameter[call[name[str], parameter[call[name[item]][constant[application]]]]]]
if compare[constant[src_domain] in name[packet]] begin[:]
call[name[print], parameter[binary_operation[constant[Domains: %s --> %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b1a1f2b0>, <ast.Subscript object at 0x7da1b1a1fdc0>]]]]]
if compare[constant[tags] in name[item]] begin[:]
call[name[print], parameter[call[name[list], parameter[call[name[item]][constant[tags]]]]]]
call[name[print], parameter[]] | keyword[def] identifier[pull] ( identifier[self] ):
literal[string]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[input_stream] :
identifier[print] ( literal[string] % identifier[item] [ literal[string] ])
identifier[print] ( literal[string] %( identifier[net_utils] . identifier[mac_to_str] ( identifier[item] [ literal[string] ][ literal[string] ]), identifier[net_utils] . identifier[mac_to_str] ( identifier[item] [ literal[string] ][ literal[string] ]), identifier[item] [ literal[string] ][ literal[string] ]))
identifier[packet_type] = identifier[item] [ literal[string] ][ literal[string] ]
identifier[print] ( literal[string] % identifier[packet_type] , identifier[end] = literal[string] )
identifier[packet] = identifier[item] [ literal[string] ]
keyword[if] identifier[packet_type] keyword[in] [ literal[string] , literal[string] ]:
identifier[print] ( literal[string] %( identifier[net_utils] . identifier[inet_to_str] ( identifier[packet] [ literal[string] ]), identifier[net_utils] . identifier[inet_to_str] ( identifier[packet] [ literal[string] ]),
identifier[packet] [ literal[string] ], identifier[packet] [ literal[string] ]), identifier[end] = literal[string] )
keyword[if] identifier[packet_type] == literal[string] :
identifier[print] ( literal[string] %( identifier[packet] [ literal[string] ], identifier[packet] [ literal[string] ], identifier[packet] [ literal[string] ]))
keyword[else] :
identifier[print] ()
keyword[else] :
identifier[print] ( identifier[str] ( identifier[packet] ))
keyword[if] identifier[item] [ literal[string] ]:
identifier[transport_info] = identifier[item] [ literal[string] ]
identifier[print] ( literal[string] % identifier[transport_info] [ literal[string] ], identifier[end] = literal[string] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[compat] . identifier[iteritems] ( identifier[transport_info] ):
keyword[if] identifier[key] != literal[string] :
identifier[print] ( identifier[key] + literal[string] + identifier[repr] ( identifier[value] ), identifier[end] = literal[string] )
identifier[data] = identifier[transport_info] [ literal[string] ]
identifier[print] ( literal[string] % identifier[len] ( identifier[data] ), identifier[end] = literal[string] )
keyword[if] identifier[data] :
identifier[print] ( literal[string] % identifier[repr] ( identifier[data] )[: literal[int] ])
keyword[else] :
identifier[print] ()
keyword[if] identifier[item] [ literal[string] ]:
identifier[print] ( literal[string] % identifier[item] [ literal[string] ][ literal[string] ], identifier[end] = literal[string] )
identifier[print] ( identifier[str] ( identifier[item] [ literal[string] ]))
keyword[if] literal[string] keyword[in] identifier[packet] :
identifier[print] ( literal[string] %( identifier[packet] [ literal[string] ], identifier[packet] [ literal[string] ]))
keyword[if] literal[string] keyword[in] identifier[item] :
identifier[print] ( identifier[list] ( identifier[item] [ literal[string] ]))
identifier[print] () | def pull(self):
"""Print out information about each packet from the input_stream"""
# For each packet in the pcap process the contents
for item in self.input_stream:
# Print out the timestamp in UTC
print('Timestamp: %s' % item['timestamp'])
# Unpack the Ethernet frame (mac src/dst, ethertype)
print('Ethernet Frame: %s --> %s (type: %d)' % (net_utils.mac_to_str(item['eth']['src']), net_utils.mac_to_str(item['eth']['dst']), item['eth']['type']))
# Print out the Packet info
packet_type = item['packet']['type']
print('Packet: %s ' % packet_type, end='')
packet = item['packet']
if packet_type in ['IP', 'IP6']:
print('%s --> %s (len:%d ttl:%d)' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst']), packet['len'], packet['ttl']), end='')
if packet_type == 'IP':
print('-- Frag(df:%d mf:%d offset:%d)' % (packet['df'], packet['mf'], packet['offset'])) # depends on [control=['if'], data=[]]
else:
print() # depends on [control=['if'], data=['packet_type']]
else:
print(str(packet))
# Print out transport and application layers
if item['transport']:
transport_info = item['transport']
print('Transport: %s ' % transport_info['type'], end='')
for (key, value) in compat.iteritems(transport_info):
if key != 'data':
print(key + ':' + repr(value), end=' ') # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=[]]
# Give summary info about data
data = transport_info['data']
print('\nData: %d bytes' % len(data), end='')
if data:
print('(%s...)' % repr(data)[:30]) # depends on [control=['if'], data=[]]
else:
print() # depends on [control=['if'], data=[]]
# Application data
if item['application']:
print('Application: %s' % item['application']['type'], end='')
print(str(item['application'])) # depends on [control=['if'], data=[]]
# Is there domain info?
if 'src_domain' in packet:
print('Domains: %s --> %s' % (packet['src_domain'], packet['dst_domain'])) # depends on [control=['if'], data=['packet']]
# Tags
if 'tags' in item:
print(list(item['tags'])) # depends on [control=['if'], data=['item']]
print() # depends on [control=['for'], data=['item']] |
def ght_alternative (img, template, indices):
"""
Alternative implementation of the general hough transform, which uses iteration over
indices rather than broadcasting rules like `ght`.
It is therefore considerably slower, especially for large, multi-dimensional arrays.
The only application are cases, where the hough transform should only be computed for
a small number of points (=template centers) in the image. In this case the indices
of interest can be provided as a list.
Parameters
----------
img : array_like
The image in which to search for the structure.
template : array_like
A boolean array containing the structure to search for.
indices : sequences
A sequence of image indices at which to compute the hough transform.
Returns
-------
hough_transform : ndarray
The general hough transformation image.
"""
# cast template to bool and img to numpy array
img = numpy.asarray(img)
template = numpy.asarray(template).astype(numpy.bool)
# check supplied parameters
if img.ndim != template.ndim:
raise AttributeError('The supplied image and template must be of the same dimensionality.')
if not numpy.all(numpy.greater_equal(img.shape, template.shape)):
raise AttributeError('The supplied template is bigger than the image. This setting makes no sense for a hough transform.')
# pad the original image
img_padded = pad(img, footprint=template, mode='constant')
# prepare the hough image
if numpy.bool == img.dtype:
img_hough = numpy.zeros(img.shape, numpy.int32)
else:
img_hough = numpy.zeros(img.shape, img.dtype)
# iterate over the pixels, apply the template center to each of these and save the sum into the hough image
for idx_hough in indices:
idx_hough = tuple(idx_hough)
slices_img_padded = [slice(idx_hough[i], None) for i in range(img_hough.ndim)]
img_hough[idx_hough] = sum(img_padded[slices_img_padded][template])
return img_hough | def function[ght_alternative, parameter[img, template, indices]]:
constant[
Alternative implementation of the general hough transform, which uses iteration over
indices rather than broadcasting rules like `ght`.
It is therefore considerably slower, especially for large, multi-dimensional arrays.
The only application are cases, where the hough transform should only be computed for
a small number of points (=template centers) in the image. In this case the indices
of interest can be provided as a list.
Parameters
----------
img : array_like
The image in which to search for the structure.
template : array_like
A boolean array containing the structure to search for.
indices : sequences
A sequence of image indices at which to compute the hough transform.
Returns
-------
hough_transform : ndarray
The general hough transformation image.
]
variable[img] assign[=] call[name[numpy].asarray, parameter[name[img]]]
variable[template] assign[=] call[call[name[numpy].asarray, parameter[name[template]]].astype, parameter[name[numpy].bool]]
if compare[name[img].ndim not_equal[!=] name[template].ndim] begin[:]
<ast.Raise object at 0x7da2044c1b10>
if <ast.UnaryOp object at 0x7da2044c26b0> begin[:]
<ast.Raise object at 0x7da2044c1150>
variable[img_padded] assign[=] call[name[pad], parameter[name[img]]]
if compare[name[numpy].bool equal[==] name[img].dtype] begin[:]
variable[img_hough] assign[=] call[name[numpy].zeros, parameter[name[img].shape, name[numpy].int32]]
for taget[name[idx_hough]] in starred[name[indices]] begin[:]
variable[idx_hough] assign[=] call[name[tuple], parameter[name[idx_hough]]]
variable[slices_img_padded] assign[=] <ast.ListComp object at 0x7da1b12b82e0>
call[name[img_hough]][name[idx_hough]] assign[=] call[name[sum], parameter[call[call[name[img_padded]][name[slices_img_padded]]][name[template]]]]
return[name[img_hough]] | keyword[def] identifier[ght_alternative] ( identifier[img] , identifier[template] , identifier[indices] ):
literal[string]
identifier[img] = identifier[numpy] . identifier[asarray] ( identifier[img] )
identifier[template] = identifier[numpy] . identifier[asarray] ( identifier[template] ). identifier[astype] ( identifier[numpy] . identifier[bool] )
keyword[if] identifier[img] . identifier[ndim] != identifier[template] . identifier[ndim] :
keyword[raise] identifier[AttributeError] ( literal[string] )
keyword[if] keyword[not] identifier[numpy] . identifier[all] ( identifier[numpy] . identifier[greater_equal] ( identifier[img] . identifier[shape] , identifier[template] . identifier[shape] )):
keyword[raise] identifier[AttributeError] ( literal[string] )
identifier[img_padded] = identifier[pad] ( identifier[img] , identifier[footprint] = identifier[template] , identifier[mode] = literal[string] )
keyword[if] identifier[numpy] . identifier[bool] == identifier[img] . identifier[dtype] :
identifier[img_hough] = identifier[numpy] . identifier[zeros] ( identifier[img] . identifier[shape] , identifier[numpy] . identifier[int32] )
keyword[else] :
identifier[img_hough] = identifier[numpy] . identifier[zeros] ( identifier[img] . identifier[shape] , identifier[img] . identifier[dtype] )
keyword[for] identifier[idx_hough] keyword[in] identifier[indices] :
identifier[idx_hough] = identifier[tuple] ( identifier[idx_hough] )
identifier[slices_img_padded] =[ identifier[slice] ( identifier[idx_hough] [ identifier[i] ], keyword[None] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[img_hough] . identifier[ndim] )]
identifier[img_hough] [ identifier[idx_hough] ]= identifier[sum] ( identifier[img_padded] [ identifier[slices_img_padded] ][ identifier[template] ])
keyword[return] identifier[img_hough] | def ght_alternative(img, template, indices):
"""
Alternative implementation of the general hough transform, which uses iteration over
indices rather than broadcasting rules like `ght`.
It is therefore considerably slower, especially for large, multi-dimensional arrays.
The only application are cases, where the hough transform should only be computed for
a small number of points (=template centers) in the image. In this case the indices
of interest can be provided as a list.
Parameters
----------
img : array_like
The image in which to search for the structure.
template : array_like
A boolean array containing the structure to search for.
indices : sequences
A sequence of image indices at which to compute the hough transform.
Returns
-------
hough_transform : ndarray
The general hough transformation image.
"""
# cast template to bool and img to numpy array
img = numpy.asarray(img)
template = numpy.asarray(template).astype(numpy.bool)
# check supplied parameters
if img.ndim != template.ndim:
raise AttributeError('The supplied image and template must be of the same dimensionality.') # depends on [control=['if'], data=[]]
if not numpy.all(numpy.greater_equal(img.shape, template.shape)):
raise AttributeError('The supplied template is bigger than the image. This setting makes no sense for a hough transform.') # depends on [control=['if'], data=[]]
# pad the original image
img_padded = pad(img, footprint=template, mode='constant')
# prepare the hough image
if numpy.bool == img.dtype:
img_hough = numpy.zeros(img.shape, numpy.int32) # depends on [control=['if'], data=[]]
else:
img_hough = numpy.zeros(img.shape, img.dtype)
# iterate over the pixels, apply the template center to each of these and save the sum into the hough image
for idx_hough in indices:
idx_hough = tuple(idx_hough)
slices_img_padded = [slice(idx_hough[i], None) for i in range(img_hough.ndim)]
img_hough[idx_hough] = sum(img_padded[slices_img_padded][template]) # depends on [control=['for'], data=['idx_hough']]
return img_hough |
def pkg_commit_hash(pkg_path):
"""Get short form of commit hash given directory `pkg_path`
We get the commit hash from (in order of preference):
* IPython.utils._sysinfo.commit
* git output, if we are in a git repository
If these fail, we return a not-found placeholder tuple
Parameters
----------
pkg_path : str
directory containing package
only used for getting commit from active repo
Returns
-------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
"""
# Try and get commit from written commit text file
if _sysinfo.commit:
return "installation", _sysinfo.commit
# maybe we are in a repository
proc = subprocess.Popen('git rev-parse --short HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=pkg_path, shell=True)
repo_commit, _ = proc.communicate()
if repo_commit:
return 'repository', repo_commit.strip()
return '(none found)', '<not found>' | def function[pkg_commit_hash, parameter[pkg_path]]:
constant[Get short form of commit hash given directory `pkg_path`
We get the commit hash from (in order of preference):
* IPython.utils._sysinfo.commit
* git output, if we are in a git repository
If these fail, we return a not-found placeholder tuple
Parameters
----------
pkg_path : str
directory containing package
only used for getting commit from active repo
Returns
-------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
]
if name[_sysinfo].commit begin[:]
return[tuple[[<ast.Constant object at 0x7da20e9b1690>, <ast.Attribute object at 0x7da20e9b2320>]]]
variable[proc] assign[=] call[name[subprocess].Popen, parameter[constant[git rev-parse --short HEAD]]]
<ast.Tuple object at 0x7da18ede5330> assign[=] call[name[proc].communicate, parameter[]]
if name[repo_commit] begin[:]
return[tuple[[<ast.Constant object at 0x7da18ede5450>, <ast.Call object at 0x7da18ede6110>]]]
return[tuple[[<ast.Constant object at 0x7da18ede4100>, <ast.Constant object at 0x7da18ede4be0>]]] | keyword[def] identifier[pkg_commit_hash] ( identifier[pkg_path] ):
literal[string]
keyword[if] identifier[_sysinfo] . identifier[commit] :
keyword[return] literal[string] , identifier[_sysinfo] . identifier[commit]
identifier[proc] = identifier[subprocess] . identifier[Popen] ( literal[string] ,
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[PIPE] ,
identifier[cwd] = identifier[pkg_path] , identifier[shell] = keyword[True] )
identifier[repo_commit] , identifier[_] = identifier[proc] . identifier[communicate] ()
keyword[if] identifier[repo_commit] :
keyword[return] literal[string] , identifier[repo_commit] . identifier[strip] ()
keyword[return] literal[string] , literal[string] | def pkg_commit_hash(pkg_path):
"""Get short form of commit hash given directory `pkg_path`
We get the commit hash from (in order of preference):
* IPython.utils._sysinfo.commit
* git output, if we are in a git repository
If these fail, we return a not-found placeholder tuple
Parameters
----------
pkg_path : str
directory containing package
only used for getting commit from active repo
Returns
-------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
"""
# Try and get commit from written commit text file
if _sysinfo.commit:
return ('installation', _sysinfo.commit) # depends on [control=['if'], data=[]]
# maybe we are in a repository
proc = subprocess.Popen('git rev-parse --short HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=pkg_path, shell=True)
(repo_commit, _) = proc.communicate()
if repo_commit:
return ('repository', repo_commit.strip()) # depends on [control=['if'], data=[]]
return ('(none found)', '<not found>') |
def _import_bibdoc():
"""Import BibDocFile."""
try:
from invenio.bibdocfile import BibRecDocs, BibDoc
except ImportError:
from invenio.legacy.bibdocfile.api import BibRecDocs, BibDoc
return BibRecDocs, BibDoc | def function[_import_bibdoc, parameter[]]:
constant[Import BibDocFile.]
<ast.Try object at 0x7da1b0136620>
return[tuple[[<ast.Name object at 0x7da1b0135ba0>, <ast.Name object at 0x7da1b01373d0>]]] | keyword[def] identifier[_import_bibdoc] ():
literal[string]
keyword[try] :
keyword[from] identifier[invenio] . identifier[bibdocfile] keyword[import] identifier[BibRecDocs] , identifier[BibDoc]
keyword[except] identifier[ImportError] :
keyword[from] identifier[invenio] . identifier[legacy] . identifier[bibdocfile] . identifier[api] keyword[import] identifier[BibRecDocs] , identifier[BibDoc]
keyword[return] identifier[BibRecDocs] , identifier[BibDoc] | def _import_bibdoc():
"""Import BibDocFile."""
try:
from invenio.bibdocfile import BibRecDocs, BibDoc # depends on [control=['try'], data=[]]
except ImportError:
from invenio.legacy.bibdocfile.api import BibRecDocs, BibDoc # depends on [control=['except'], data=[]]
return (BibRecDocs, BibDoc) |
def transit_export_key(self, name, key_type, version=None, mount_point='transit'):
"""GET /<mount_point>/export/<key_type>/<name>(/<version>)
:param name:
:type name:
:param key_type:
:type key_type:
:param version:
:type version:
:param mount_point:
:type mount_point:
:return:
:rtype:
"""
if version is not None:
url = '/v1/{0}/export/{1}/{2}/{3}'.format(mount_point, key_type, name, version)
else:
url = '/v1/{0}/export/{1}/{2}'.format(mount_point, key_type, name)
return self._adapter.get(url).json() | def function[transit_export_key, parameter[self, name, key_type, version, mount_point]]:
constant[GET /<mount_point>/export/<key_type>/<name>(/<version>)
:param name:
:type name:
:param key_type:
:type key_type:
:param version:
:type version:
:param mount_point:
:type mount_point:
:return:
:rtype:
]
if compare[name[version] is_not constant[None]] begin[:]
variable[url] assign[=] call[constant[/v1/{0}/export/{1}/{2}/{3}].format, parameter[name[mount_point], name[key_type], name[name], name[version]]]
return[call[call[name[self]._adapter.get, parameter[name[url]]].json, parameter[]]] | keyword[def] identifier[transit_export_key] ( identifier[self] , identifier[name] , identifier[key_type] , identifier[version] = keyword[None] , identifier[mount_point] = literal[string] ):
literal[string]
keyword[if] identifier[version] keyword[is] keyword[not] keyword[None] :
identifier[url] = literal[string] . identifier[format] ( identifier[mount_point] , identifier[key_type] , identifier[name] , identifier[version] )
keyword[else] :
identifier[url] = literal[string] . identifier[format] ( identifier[mount_point] , identifier[key_type] , identifier[name] )
keyword[return] identifier[self] . identifier[_adapter] . identifier[get] ( identifier[url] ). identifier[json] () | def transit_export_key(self, name, key_type, version=None, mount_point='transit'):
"""GET /<mount_point>/export/<key_type>/<name>(/<version>)
:param name:
:type name:
:param key_type:
:type key_type:
:param version:
:type version:
:param mount_point:
:type mount_point:
:return:
:rtype:
"""
if version is not None:
url = '/v1/{0}/export/{1}/{2}/{3}'.format(mount_point, key_type, name, version) # depends on [control=['if'], data=['version']]
else:
url = '/v1/{0}/export/{1}/{2}'.format(mount_point, key_type, name)
return self._adapter.get(url).json() |
def register_domain(DomainName=None, IdnLangCode=None, DurationInYears=None, AutoRenew=None, AdminContact=None, RegistrantContact=None, TechContact=None, PrivacyProtectAdminContact=None, PrivacyProtectRegistrantContact=None, PrivacyProtectTechContact=None):
"""
This operation registers a domain. Domains are registered by the AWS registrar partner, Gandi. For some top-level domains (TLDs), this operation requires extra parameters.
When you register a domain, Amazon Route 53 does the following:
See also: AWS API Documentation
:example: response = client.register_domain(
DomainName='string',
IdnLangCode='string',
DurationInYears=123,
AutoRenew=True|False,
AdminContact={
'FirstName': 'string',
'LastName': 'string',
'ContactType': 'PERSON'|'COMPANY'|'ASSOCIATION'|'PUBLIC_BODY'|'RESELLER',
'OrganizationName': 'string',
'AddressLine1': 'string',
'AddressLine2': 'string',
'City': 'string',
'State': 'string',
'CountryCode': 'AD'|'AE'|'AF'|'AG'|'AI'|'AL'|'AM'|'AN'|'AO'|'AQ'|'AR'|'AS'|'AT'|'AU'|'AW'|'AZ'|'BA'|'BB'|'BD'|'BE'|'BF'|'BG'|'BH'|'BI'|'BJ'|'BL'|'BM'|'BN'|'BO'|'BR'|'BS'|'BT'|'BW'|'BY'|'BZ'|'CA'|'CC'|'CD'|'CF'|'CG'|'CH'|'CI'|'CK'|'CL'|'CM'|'CN'|'CO'|'CR'|'CU'|'CV'|'CX'|'CY'|'CZ'|'DE'|'DJ'|'DK'|'DM'|'DO'|'DZ'|'EC'|'EE'|'EG'|'ER'|'ES'|'ET'|'FI'|'FJ'|'FK'|'FM'|'FO'|'FR'|'GA'|'GB'|'GD'|'GE'|'GH'|'GI'|'GL'|'GM'|'GN'|'GQ'|'GR'|'GT'|'GU'|'GW'|'GY'|'HK'|'HN'|'HR'|'HT'|'HU'|'ID'|'IE'|'IL'|'IM'|'IN'|'IQ'|'IR'|'IS'|'IT'|'JM'|'JO'|'JP'|'KE'|'KG'|'KH'|'KI'|'KM'|'KN'|'KP'|'KR'|'KW'|'KY'|'KZ'|'LA'|'LB'|'LC'|'LI'|'LK'|'LR'|'LS'|'LT'|'LU'|'LV'|'LY'|'MA'|'MC'|'MD'|'ME'|'MF'|'MG'|'MH'|'MK'|'ML'|'MM'|'MN'|'MO'|'MP'|'MR'|'MS'|'MT'|'MU'|'MV'|'MW'|'MX'|'MY'|'MZ'|'NA'|'NC'|'NE'|'NG'|'NI'|'NL'|'NO'|'NP'|'NR'|'NU'|'NZ'|'OM'|'PA'|'PE'|'PF'|'PG'|'PH'|'PK'|'PL'|'PM'|'PN'|'PR'|'PT'|'PW'|'PY'|'QA'|'RO'|'RS'|'RU'|'RW'|'SA'|'SB'|'SC'|'SD'|'SE'|'SG'|'SH'|'SI'|'SK'|'SL'|'SM'|'SN'|'SO'|'SR'|'ST'|'SV'|'SY'|'SZ'|'TC'|'TD'|'TG'|'TH'|'TJ'|'TK'|'TL'|'TM'|'TN'|'TO'|'TR'|'TT'|'TV'|'TW'|'TZ'|'UA'|'UG'|'US'|'UY'|'UZ'|'VA'|'VC'|'VE'|'VG'|'VI'|'VN'|'VU'|'WF'|'WS'|'YE'|'YT'|'ZA'|'ZM'|'ZW',
'ZipCode': 'string',
'PhoneNumber': 'string',
'Email': 'string',
'Fax': 'string',
'ExtraParams': [
{
'Name': 'DUNS_NUMBER'|'BRAND_NUMBER'|'BIRTH_DEPARTMENT'|'BIRTH_DATE_IN_YYYY_MM_DD'|'BIRTH_COUNTRY'|'BIRTH_CITY'|'DOCUMENT_NUMBER'|'AU_ID_NUMBER'|'AU_ID_TYPE'|'CA_LEGAL_TYPE'|'CA_BUSINESS_ENTITY_TYPE'|'ES_IDENTIFICATION'|'ES_IDENTIFICATION_TYPE'|'ES_LEGAL_FORM'|'FI_BUSINESS_NUMBER'|'FI_ID_NUMBER'|'IT_PIN'|'RU_PASSPORT_DATA'|'SE_ID_NUMBER'|'SG_ID_NUMBER'|'VAT_NUMBER',
'Value': 'string'
},
]
},
RegistrantContact={
'FirstName': 'string',
'LastName': 'string',
'ContactType': 'PERSON'|'COMPANY'|'ASSOCIATION'|'PUBLIC_BODY'|'RESELLER',
'OrganizationName': 'string',
'AddressLine1': 'string',
'AddressLine2': 'string',
'City': 'string',
'State': 'string',
'CountryCode': 'AD'|'AE'|'AF'|'AG'|'AI'|'AL'|'AM'|'AN'|'AO'|'AQ'|'AR'|'AS'|'AT'|'AU'|'AW'|'AZ'|'BA'|'BB'|'BD'|'BE'|'BF'|'BG'|'BH'|'BI'|'BJ'|'BL'|'BM'|'BN'|'BO'|'BR'|'BS'|'BT'|'BW'|'BY'|'BZ'|'CA'|'CC'|'CD'|'CF'|'CG'|'CH'|'CI'|'CK'|'CL'|'CM'|'CN'|'CO'|'CR'|'CU'|'CV'|'CX'|'CY'|'CZ'|'DE'|'DJ'|'DK'|'DM'|'DO'|'DZ'|'EC'|'EE'|'EG'|'ER'|'ES'|'ET'|'FI'|'FJ'|'FK'|'FM'|'FO'|'FR'|'GA'|'GB'|'GD'|'GE'|'GH'|'GI'|'GL'|'GM'|'GN'|'GQ'|'GR'|'GT'|'GU'|'GW'|'GY'|'HK'|'HN'|'HR'|'HT'|'HU'|'ID'|'IE'|'IL'|'IM'|'IN'|'IQ'|'IR'|'IS'|'IT'|'JM'|'JO'|'JP'|'KE'|'KG'|'KH'|'KI'|'KM'|'KN'|'KP'|'KR'|'KW'|'KY'|'KZ'|'LA'|'LB'|'LC'|'LI'|'LK'|'LR'|'LS'|'LT'|'LU'|'LV'|'LY'|'MA'|'MC'|'MD'|'ME'|'MF'|'MG'|'MH'|'MK'|'ML'|'MM'|'MN'|'MO'|'MP'|'MR'|'MS'|'MT'|'MU'|'MV'|'MW'|'MX'|'MY'|'MZ'|'NA'|'NC'|'NE'|'NG'|'NI'|'NL'|'NO'|'NP'|'NR'|'NU'|'NZ'|'OM'|'PA'|'PE'|'PF'|'PG'|'PH'|'PK'|'PL'|'PM'|'PN'|'PR'|'PT'|'PW'|'PY'|'QA'|'RO'|'RS'|'RU'|'RW'|'SA'|'SB'|'SC'|'SD'|'SE'|'SG'|'SH'|'SI'|'SK'|'SL'|'SM'|'SN'|'SO'|'SR'|'ST'|'SV'|'SY'|'SZ'|'TC'|'TD'|'TG'|'TH'|'TJ'|'TK'|'TL'|'TM'|'TN'|'TO'|'TR'|'TT'|'TV'|'TW'|'TZ'|'UA'|'UG'|'US'|'UY'|'UZ'|'VA'|'VC'|'VE'|'VG'|'VI'|'VN'|'VU'|'WF'|'WS'|'YE'|'YT'|'ZA'|'ZM'|'ZW',
'ZipCode': 'string',
'PhoneNumber': 'string',
'Email': 'string',
'Fax': 'string',
'ExtraParams': [
{
'Name': 'DUNS_NUMBER'|'BRAND_NUMBER'|'BIRTH_DEPARTMENT'|'BIRTH_DATE_IN_YYYY_MM_DD'|'BIRTH_COUNTRY'|'BIRTH_CITY'|'DOCUMENT_NUMBER'|'AU_ID_NUMBER'|'AU_ID_TYPE'|'CA_LEGAL_TYPE'|'CA_BUSINESS_ENTITY_TYPE'|'ES_IDENTIFICATION'|'ES_IDENTIFICATION_TYPE'|'ES_LEGAL_FORM'|'FI_BUSINESS_NUMBER'|'FI_ID_NUMBER'|'IT_PIN'|'RU_PASSPORT_DATA'|'SE_ID_NUMBER'|'SG_ID_NUMBER'|'VAT_NUMBER',
'Value': 'string'
},
]
},
TechContact={
'FirstName': 'string',
'LastName': 'string',
'ContactType': 'PERSON'|'COMPANY'|'ASSOCIATION'|'PUBLIC_BODY'|'RESELLER',
'OrganizationName': 'string',
'AddressLine1': 'string',
'AddressLine2': 'string',
'City': 'string',
'State': 'string',
'CountryCode': 'AD'|'AE'|'AF'|'AG'|'AI'|'AL'|'AM'|'AN'|'AO'|'AQ'|'AR'|'AS'|'AT'|'AU'|'AW'|'AZ'|'BA'|'BB'|'BD'|'BE'|'BF'|'BG'|'BH'|'BI'|'BJ'|'BL'|'BM'|'BN'|'BO'|'BR'|'BS'|'BT'|'BW'|'BY'|'BZ'|'CA'|'CC'|'CD'|'CF'|'CG'|'CH'|'CI'|'CK'|'CL'|'CM'|'CN'|'CO'|'CR'|'CU'|'CV'|'CX'|'CY'|'CZ'|'DE'|'DJ'|'DK'|'DM'|'DO'|'DZ'|'EC'|'EE'|'EG'|'ER'|'ES'|'ET'|'FI'|'FJ'|'FK'|'FM'|'FO'|'FR'|'GA'|'GB'|'GD'|'GE'|'GH'|'GI'|'GL'|'GM'|'GN'|'GQ'|'GR'|'GT'|'GU'|'GW'|'GY'|'HK'|'HN'|'HR'|'HT'|'HU'|'ID'|'IE'|'IL'|'IM'|'IN'|'IQ'|'IR'|'IS'|'IT'|'JM'|'JO'|'JP'|'KE'|'KG'|'KH'|'KI'|'KM'|'KN'|'KP'|'KR'|'KW'|'KY'|'KZ'|'LA'|'LB'|'LC'|'LI'|'LK'|'LR'|'LS'|'LT'|'LU'|'LV'|'LY'|'MA'|'MC'|'MD'|'ME'|'MF'|'MG'|'MH'|'MK'|'ML'|'MM'|'MN'|'MO'|'MP'|'MR'|'MS'|'MT'|'MU'|'MV'|'MW'|'MX'|'MY'|'MZ'|'NA'|'NC'|'NE'|'NG'|'NI'|'NL'|'NO'|'NP'|'NR'|'NU'|'NZ'|'OM'|'PA'|'PE'|'PF'|'PG'|'PH'|'PK'|'PL'|'PM'|'PN'|'PR'|'PT'|'PW'|'PY'|'QA'|'RO'|'RS'|'RU'|'RW'|'SA'|'SB'|'SC'|'SD'|'SE'|'SG'|'SH'|'SI'|'SK'|'SL'|'SM'|'SN'|'SO'|'SR'|'ST'|'SV'|'SY'|'SZ'|'TC'|'TD'|'TG'|'TH'|'TJ'|'TK'|'TL'|'TM'|'TN'|'TO'|'TR'|'TT'|'TV'|'TW'|'TZ'|'UA'|'UG'|'US'|'UY'|'UZ'|'VA'|'VC'|'VE'|'VG'|'VI'|'VN'|'VU'|'WF'|'WS'|'YE'|'YT'|'ZA'|'ZM'|'ZW',
'ZipCode': 'string',
'PhoneNumber': 'string',
'Email': 'string',
'Fax': 'string',
'ExtraParams': [
{
'Name': 'DUNS_NUMBER'|'BRAND_NUMBER'|'BIRTH_DEPARTMENT'|'BIRTH_DATE_IN_YYYY_MM_DD'|'BIRTH_COUNTRY'|'BIRTH_CITY'|'DOCUMENT_NUMBER'|'AU_ID_NUMBER'|'AU_ID_TYPE'|'CA_LEGAL_TYPE'|'CA_BUSINESS_ENTITY_TYPE'|'ES_IDENTIFICATION'|'ES_IDENTIFICATION_TYPE'|'ES_LEGAL_FORM'|'FI_BUSINESS_NUMBER'|'FI_ID_NUMBER'|'IT_PIN'|'RU_PASSPORT_DATA'|'SE_ID_NUMBER'|'SG_ID_NUMBER'|'VAT_NUMBER',
'Value': 'string'
},
]
},
PrivacyProtectAdminContact=True|False,
PrivacyProtectRegistrantContact=True|False,
PrivacyProtectTechContact=True|False
)
:type DomainName: string
:param DomainName: [REQUIRED]
The domain name that you want to register.
Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.
:type IdnLangCode: string
:param IdnLangCode: Reserved for future use.
:type DurationInYears: integer
:param DurationInYears: [REQUIRED]
The number of years that you want to register the domain for. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide .
Default: 1
:type AutoRenew: boolean
:param AutoRenew: Indicates whether the domain will be automatically renewed (true ) or not (false ). Autorenewal only takes effect after the account is charged.
Default: true
:type AdminContact: dict
:param AdminContact: [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
:type RegistrantContact: dict
:param RegistrantContact: [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
:type TechContact: dict
:param TechContact: [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
:type PrivacyProtectAdminContact: boolean
:param PrivacyProtectAdminContact: Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ('who is') queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
:type PrivacyProtectRegistrantContact: boolean
:param PrivacyProtectRegistrantContact: Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ('who is') queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
:type PrivacyProtectTechContact: boolean
:param PrivacyProtectTechContact: Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ('who is') queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
:rtype: dict
:return: {
'OperationId': 'string'
}
:returns:
DomainName (string) -- [REQUIRED]
The domain name that you want to register.
Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.
IdnLangCode (string) -- Reserved for future use.
DurationInYears (integer) -- [REQUIRED]
The number of years that you want to register the domain for. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide .
Default: 1
AutoRenew (boolean) -- Indicates whether the domain will be automatically renewed (true ) or not (false ). Autorenewal only takes effect after the account is charged.
Default: true
AdminContact (dict) -- [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
RegistrantContact (dict) -- [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
TechContact (dict) -- [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
PrivacyProtectAdminContact (boolean) -- Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ("who is") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
PrivacyProtectRegistrantContact (boolean) -- Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ("who is") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
PrivacyProtectTechContact (boolean) -- Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ("who is") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
"""
pass | def function[register_domain, parameter[DomainName, IdnLangCode, DurationInYears, AutoRenew, AdminContact, RegistrantContact, TechContact, PrivacyProtectAdminContact, PrivacyProtectRegistrantContact, PrivacyProtectTechContact]]:
constant[
This operation registers a domain. Domains are registered by the AWS registrar partner, Gandi. For some top-level domains (TLDs), this operation requires extra parameters.
When you register a domain, Amazon Route 53 does the following:
See also: AWS API Documentation
:example: response = client.register_domain(
DomainName='string',
IdnLangCode='string',
DurationInYears=123,
AutoRenew=True|False,
AdminContact={
'FirstName': 'string',
'LastName': 'string',
'ContactType': 'PERSON'|'COMPANY'|'ASSOCIATION'|'PUBLIC_BODY'|'RESELLER',
'OrganizationName': 'string',
'AddressLine1': 'string',
'AddressLine2': 'string',
'City': 'string',
'State': 'string',
'CountryCode': 'AD'|'AE'|'AF'|'AG'|'AI'|'AL'|'AM'|'AN'|'AO'|'AQ'|'AR'|'AS'|'AT'|'AU'|'AW'|'AZ'|'BA'|'BB'|'BD'|'BE'|'BF'|'BG'|'BH'|'BI'|'BJ'|'BL'|'BM'|'BN'|'BO'|'BR'|'BS'|'BT'|'BW'|'BY'|'BZ'|'CA'|'CC'|'CD'|'CF'|'CG'|'CH'|'CI'|'CK'|'CL'|'CM'|'CN'|'CO'|'CR'|'CU'|'CV'|'CX'|'CY'|'CZ'|'DE'|'DJ'|'DK'|'DM'|'DO'|'DZ'|'EC'|'EE'|'EG'|'ER'|'ES'|'ET'|'FI'|'FJ'|'FK'|'FM'|'FO'|'FR'|'GA'|'GB'|'GD'|'GE'|'GH'|'GI'|'GL'|'GM'|'GN'|'GQ'|'GR'|'GT'|'GU'|'GW'|'GY'|'HK'|'HN'|'HR'|'HT'|'HU'|'ID'|'IE'|'IL'|'IM'|'IN'|'IQ'|'IR'|'IS'|'IT'|'JM'|'JO'|'JP'|'KE'|'KG'|'KH'|'KI'|'KM'|'KN'|'KP'|'KR'|'KW'|'KY'|'KZ'|'LA'|'LB'|'LC'|'LI'|'LK'|'LR'|'LS'|'LT'|'LU'|'LV'|'LY'|'MA'|'MC'|'MD'|'ME'|'MF'|'MG'|'MH'|'MK'|'ML'|'MM'|'MN'|'MO'|'MP'|'MR'|'MS'|'MT'|'MU'|'MV'|'MW'|'MX'|'MY'|'MZ'|'NA'|'NC'|'NE'|'NG'|'NI'|'NL'|'NO'|'NP'|'NR'|'NU'|'NZ'|'OM'|'PA'|'PE'|'PF'|'PG'|'PH'|'PK'|'PL'|'PM'|'PN'|'PR'|'PT'|'PW'|'PY'|'QA'|'RO'|'RS'|'RU'|'RW'|'SA'|'SB'|'SC'|'SD'|'SE'|'SG'|'SH'|'SI'|'SK'|'SL'|'SM'|'SN'|'SO'|'SR'|'ST'|'SV'|'SY'|'SZ'|'TC'|'TD'|'TG'|'TH'|'TJ'|'TK'|'TL'|'TM'|'TN'|'TO'|'TR'|'TT'|'TV'|'TW'|'TZ'|'UA'|'UG'|'US'|'UY'|'UZ'|'VA'|'VC'|'VE'|'VG'|'VI'|'VN'|'VU'|'WF'|'WS'|'YE'|'YT'|'ZA'|'ZM'|'ZW',
'ZipCode': 'string',
'PhoneNumber': 'string',
'Email': 'string',
'Fax': 'string',
'ExtraParams': [
{
'Name': 'DUNS_NUMBER'|'BRAND_NUMBER'|'BIRTH_DEPARTMENT'|'BIRTH_DATE_IN_YYYY_MM_DD'|'BIRTH_COUNTRY'|'BIRTH_CITY'|'DOCUMENT_NUMBER'|'AU_ID_NUMBER'|'AU_ID_TYPE'|'CA_LEGAL_TYPE'|'CA_BUSINESS_ENTITY_TYPE'|'ES_IDENTIFICATION'|'ES_IDENTIFICATION_TYPE'|'ES_LEGAL_FORM'|'FI_BUSINESS_NUMBER'|'FI_ID_NUMBER'|'IT_PIN'|'RU_PASSPORT_DATA'|'SE_ID_NUMBER'|'SG_ID_NUMBER'|'VAT_NUMBER',
'Value': 'string'
},
]
},
RegistrantContact={
'FirstName': 'string',
'LastName': 'string',
'ContactType': 'PERSON'|'COMPANY'|'ASSOCIATION'|'PUBLIC_BODY'|'RESELLER',
'OrganizationName': 'string',
'AddressLine1': 'string',
'AddressLine2': 'string',
'City': 'string',
'State': 'string',
'CountryCode': 'AD'|'AE'|'AF'|'AG'|'AI'|'AL'|'AM'|'AN'|'AO'|'AQ'|'AR'|'AS'|'AT'|'AU'|'AW'|'AZ'|'BA'|'BB'|'BD'|'BE'|'BF'|'BG'|'BH'|'BI'|'BJ'|'BL'|'BM'|'BN'|'BO'|'BR'|'BS'|'BT'|'BW'|'BY'|'BZ'|'CA'|'CC'|'CD'|'CF'|'CG'|'CH'|'CI'|'CK'|'CL'|'CM'|'CN'|'CO'|'CR'|'CU'|'CV'|'CX'|'CY'|'CZ'|'DE'|'DJ'|'DK'|'DM'|'DO'|'DZ'|'EC'|'EE'|'EG'|'ER'|'ES'|'ET'|'FI'|'FJ'|'FK'|'FM'|'FO'|'FR'|'GA'|'GB'|'GD'|'GE'|'GH'|'GI'|'GL'|'GM'|'GN'|'GQ'|'GR'|'GT'|'GU'|'GW'|'GY'|'HK'|'HN'|'HR'|'HT'|'HU'|'ID'|'IE'|'IL'|'IM'|'IN'|'IQ'|'IR'|'IS'|'IT'|'JM'|'JO'|'JP'|'KE'|'KG'|'KH'|'KI'|'KM'|'KN'|'KP'|'KR'|'KW'|'KY'|'KZ'|'LA'|'LB'|'LC'|'LI'|'LK'|'LR'|'LS'|'LT'|'LU'|'LV'|'LY'|'MA'|'MC'|'MD'|'ME'|'MF'|'MG'|'MH'|'MK'|'ML'|'MM'|'MN'|'MO'|'MP'|'MR'|'MS'|'MT'|'MU'|'MV'|'MW'|'MX'|'MY'|'MZ'|'NA'|'NC'|'NE'|'NG'|'NI'|'NL'|'NO'|'NP'|'NR'|'NU'|'NZ'|'OM'|'PA'|'PE'|'PF'|'PG'|'PH'|'PK'|'PL'|'PM'|'PN'|'PR'|'PT'|'PW'|'PY'|'QA'|'RO'|'RS'|'RU'|'RW'|'SA'|'SB'|'SC'|'SD'|'SE'|'SG'|'SH'|'SI'|'SK'|'SL'|'SM'|'SN'|'SO'|'SR'|'ST'|'SV'|'SY'|'SZ'|'TC'|'TD'|'TG'|'TH'|'TJ'|'TK'|'TL'|'TM'|'TN'|'TO'|'TR'|'TT'|'TV'|'TW'|'TZ'|'UA'|'UG'|'US'|'UY'|'UZ'|'VA'|'VC'|'VE'|'VG'|'VI'|'VN'|'VU'|'WF'|'WS'|'YE'|'YT'|'ZA'|'ZM'|'ZW',
'ZipCode': 'string',
'PhoneNumber': 'string',
'Email': 'string',
'Fax': 'string',
'ExtraParams': [
{
'Name': 'DUNS_NUMBER'|'BRAND_NUMBER'|'BIRTH_DEPARTMENT'|'BIRTH_DATE_IN_YYYY_MM_DD'|'BIRTH_COUNTRY'|'BIRTH_CITY'|'DOCUMENT_NUMBER'|'AU_ID_NUMBER'|'AU_ID_TYPE'|'CA_LEGAL_TYPE'|'CA_BUSINESS_ENTITY_TYPE'|'ES_IDENTIFICATION'|'ES_IDENTIFICATION_TYPE'|'ES_LEGAL_FORM'|'FI_BUSINESS_NUMBER'|'FI_ID_NUMBER'|'IT_PIN'|'RU_PASSPORT_DATA'|'SE_ID_NUMBER'|'SG_ID_NUMBER'|'VAT_NUMBER',
'Value': 'string'
},
]
},
TechContact={
'FirstName': 'string',
'LastName': 'string',
'ContactType': 'PERSON'|'COMPANY'|'ASSOCIATION'|'PUBLIC_BODY'|'RESELLER',
'OrganizationName': 'string',
'AddressLine1': 'string',
'AddressLine2': 'string',
'City': 'string',
'State': 'string',
'CountryCode': 'AD'|'AE'|'AF'|'AG'|'AI'|'AL'|'AM'|'AN'|'AO'|'AQ'|'AR'|'AS'|'AT'|'AU'|'AW'|'AZ'|'BA'|'BB'|'BD'|'BE'|'BF'|'BG'|'BH'|'BI'|'BJ'|'BL'|'BM'|'BN'|'BO'|'BR'|'BS'|'BT'|'BW'|'BY'|'BZ'|'CA'|'CC'|'CD'|'CF'|'CG'|'CH'|'CI'|'CK'|'CL'|'CM'|'CN'|'CO'|'CR'|'CU'|'CV'|'CX'|'CY'|'CZ'|'DE'|'DJ'|'DK'|'DM'|'DO'|'DZ'|'EC'|'EE'|'EG'|'ER'|'ES'|'ET'|'FI'|'FJ'|'FK'|'FM'|'FO'|'FR'|'GA'|'GB'|'GD'|'GE'|'GH'|'GI'|'GL'|'GM'|'GN'|'GQ'|'GR'|'GT'|'GU'|'GW'|'GY'|'HK'|'HN'|'HR'|'HT'|'HU'|'ID'|'IE'|'IL'|'IM'|'IN'|'IQ'|'IR'|'IS'|'IT'|'JM'|'JO'|'JP'|'KE'|'KG'|'KH'|'KI'|'KM'|'KN'|'KP'|'KR'|'KW'|'KY'|'KZ'|'LA'|'LB'|'LC'|'LI'|'LK'|'LR'|'LS'|'LT'|'LU'|'LV'|'LY'|'MA'|'MC'|'MD'|'ME'|'MF'|'MG'|'MH'|'MK'|'ML'|'MM'|'MN'|'MO'|'MP'|'MR'|'MS'|'MT'|'MU'|'MV'|'MW'|'MX'|'MY'|'MZ'|'NA'|'NC'|'NE'|'NG'|'NI'|'NL'|'NO'|'NP'|'NR'|'NU'|'NZ'|'OM'|'PA'|'PE'|'PF'|'PG'|'PH'|'PK'|'PL'|'PM'|'PN'|'PR'|'PT'|'PW'|'PY'|'QA'|'RO'|'RS'|'RU'|'RW'|'SA'|'SB'|'SC'|'SD'|'SE'|'SG'|'SH'|'SI'|'SK'|'SL'|'SM'|'SN'|'SO'|'SR'|'ST'|'SV'|'SY'|'SZ'|'TC'|'TD'|'TG'|'TH'|'TJ'|'TK'|'TL'|'TM'|'TN'|'TO'|'TR'|'TT'|'TV'|'TW'|'TZ'|'UA'|'UG'|'US'|'UY'|'UZ'|'VA'|'VC'|'VE'|'VG'|'VI'|'VN'|'VU'|'WF'|'WS'|'YE'|'YT'|'ZA'|'ZM'|'ZW',
'ZipCode': 'string',
'PhoneNumber': 'string',
'Email': 'string',
'Fax': 'string',
'ExtraParams': [
{
'Name': 'DUNS_NUMBER'|'BRAND_NUMBER'|'BIRTH_DEPARTMENT'|'BIRTH_DATE_IN_YYYY_MM_DD'|'BIRTH_COUNTRY'|'BIRTH_CITY'|'DOCUMENT_NUMBER'|'AU_ID_NUMBER'|'AU_ID_TYPE'|'CA_LEGAL_TYPE'|'CA_BUSINESS_ENTITY_TYPE'|'ES_IDENTIFICATION'|'ES_IDENTIFICATION_TYPE'|'ES_LEGAL_FORM'|'FI_BUSINESS_NUMBER'|'FI_ID_NUMBER'|'IT_PIN'|'RU_PASSPORT_DATA'|'SE_ID_NUMBER'|'SG_ID_NUMBER'|'VAT_NUMBER',
'Value': 'string'
},
]
},
PrivacyProtectAdminContact=True|False,
PrivacyProtectRegistrantContact=True|False,
PrivacyProtectTechContact=True|False
)
:type DomainName: string
:param DomainName: [REQUIRED]
The domain name that you want to register.
Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.
:type IdnLangCode: string
:param IdnLangCode: Reserved for future use.
:type DurationInYears: integer
:param DurationInYears: [REQUIRED]
The number of years that you want to register the domain for. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide .
Default: 1
:type AutoRenew: boolean
:param AutoRenew: Indicates whether the domain will be automatically renewed (true ) or not (false ). Autorenewal only takes effect after the account is charged.
Default: true
:type AdminContact: dict
:param AdminContact: [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
:type RegistrantContact: dict
:param RegistrantContact: [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
:type TechContact: dict
:param TechContact: [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
:type PrivacyProtectAdminContact: boolean
:param PrivacyProtectAdminContact: Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ('who is') queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
:type PrivacyProtectRegistrantContact: boolean
:param PrivacyProtectRegistrantContact: Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ('who is') queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
:type PrivacyProtectTechContact: boolean
:param PrivacyProtectTechContact: Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ('who is') queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
:rtype: dict
:return: {
'OperationId': 'string'
}
:returns:
DomainName (string) -- [REQUIRED]
The domain name that you want to register.
Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.
IdnLangCode (string) -- Reserved for future use.
DurationInYears (integer) -- [REQUIRED]
The number of years that you want to register the domain for. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide .
Default: 1
AutoRenew (boolean) -- Indicates whether the domain will be automatically renewed (true ) or not (false ). Autorenewal only takes effect after the account is charged.
Default: true
AdminContact (dict) -- [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
RegistrantContact (dict) -- [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
TechContact (dict) -- [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
PrivacyProtectAdminContact (boolean) -- Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ("who is") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
PrivacyProtectRegistrantContact (boolean) -- Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ("who is") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
PrivacyProtectTechContact (boolean) -- Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ("who is") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
]
pass | keyword[def] identifier[register_domain] ( identifier[DomainName] = keyword[None] , identifier[IdnLangCode] = keyword[None] , identifier[DurationInYears] = keyword[None] , identifier[AutoRenew] = keyword[None] , identifier[AdminContact] = keyword[None] , identifier[RegistrantContact] = keyword[None] , identifier[TechContact] = keyword[None] , identifier[PrivacyProtectAdminContact] = keyword[None] , identifier[PrivacyProtectRegistrantContact] = keyword[None] , identifier[PrivacyProtectTechContact] = keyword[None] ):
literal[string]
keyword[pass] | def register_domain(DomainName=None, IdnLangCode=None, DurationInYears=None, AutoRenew=None, AdminContact=None, RegistrantContact=None, TechContact=None, PrivacyProtectAdminContact=None, PrivacyProtectRegistrantContact=None, PrivacyProtectTechContact=None):
"""
This operation registers a domain. Domains are registered by the AWS registrar partner, Gandi. For some top-level domains (TLDs), this operation requires extra parameters.
When you register a domain, Amazon Route 53 does the following:
See also: AWS API Documentation
:example: response = client.register_domain(
DomainName='string',
IdnLangCode='string',
DurationInYears=123,
AutoRenew=True|False,
AdminContact={
'FirstName': 'string',
'LastName': 'string',
'ContactType': 'PERSON'|'COMPANY'|'ASSOCIATION'|'PUBLIC_BODY'|'RESELLER',
'OrganizationName': 'string',
'AddressLine1': 'string',
'AddressLine2': 'string',
'City': 'string',
'State': 'string',
'CountryCode': 'AD'|'AE'|'AF'|'AG'|'AI'|'AL'|'AM'|'AN'|'AO'|'AQ'|'AR'|'AS'|'AT'|'AU'|'AW'|'AZ'|'BA'|'BB'|'BD'|'BE'|'BF'|'BG'|'BH'|'BI'|'BJ'|'BL'|'BM'|'BN'|'BO'|'BR'|'BS'|'BT'|'BW'|'BY'|'BZ'|'CA'|'CC'|'CD'|'CF'|'CG'|'CH'|'CI'|'CK'|'CL'|'CM'|'CN'|'CO'|'CR'|'CU'|'CV'|'CX'|'CY'|'CZ'|'DE'|'DJ'|'DK'|'DM'|'DO'|'DZ'|'EC'|'EE'|'EG'|'ER'|'ES'|'ET'|'FI'|'FJ'|'FK'|'FM'|'FO'|'FR'|'GA'|'GB'|'GD'|'GE'|'GH'|'GI'|'GL'|'GM'|'GN'|'GQ'|'GR'|'GT'|'GU'|'GW'|'GY'|'HK'|'HN'|'HR'|'HT'|'HU'|'ID'|'IE'|'IL'|'IM'|'IN'|'IQ'|'IR'|'IS'|'IT'|'JM'|'JO'|'JP'|'KE'|'KG'|'KH'|'KI'|'KM'|'KN'|'KP'|'KR'|'KW'|'KY'|'KZ'|'LA'|'LB'|'LC'|'LI'|'LK'|'LR'|'LS'|'LT'|'LU'|'LV'|'LY'|'MA'|'MC'|'MD'|'ME'|'MF'|'MG'|'MH'|'MK'|'ML'|'MM'|'MN'|'MO'|'MP'|'MR'|'MS'|'MT'|'MU'|'MV'|'MW'|'MX'|'MY'|'MZ'|'NA'|'NC'|'NE'|'NG'|'NI'|'NL'|'NO'|'NP'|'NR'|'NU'|'NZ'|'OM'|'PA'|'PE'|'PF'|'PG'|'PH'|'PK'|'PL'|'PM'|'PN'|'PR'|'PT'|'PW'|'PY'|'QA'|'RO'|'RS'|'RU'|'RW'|'SA'|'SB'|'SC'|'SD'|'SE'|'SG'|'SH'|'SI'|'SK'|'SL'|'SM'|'SN'|'SO'|'SR'|'ST'|'SV'|'SY'|'SZ'|'TC'|'TD'|'TG'|'TH'|'TJ'|'TK'|'TL'|'TM'|'TN'|'TO'|'TR'|'TT'|'TV'|'TW'|'TZ'|'UA'|'UG'|'US'|'UY'|'UZ'|'VA'|'VC'|'VE'|'VG'|'VI'|'VN'|'VU'|'WF'|'WS'|'YE'|'YT'|'ZA'|'ZM'|'ZW',
'ZipCode': 'string',
'PhoneNumber': 'string',
'Email': 'string',
'Fax': 'string',
'ExtraParams': [
{
'Name': 'DUNS_NUMBER'|'BRAND_NUMBER'|'BIRTH_DEPARTMENT'|'BIRTH_DATE_IN_YYYY_MM_DD'|'BIRTH_COUNTRY'|'BIRTH_CITY'|'DOCUMENT_NUMBER'|'AU_ID_NUMBER'|'AU_ID_TYPE'|'CA_LEGAL_TYPE'|'CA_BUSINESS_ENTITY_TYPE'|'ES_IDENTIFICATION'|'ES_IDENTIFICATION_TYPE'|'ES_LEGAL_FORM'|'FI_BUSINESS_NUMBER'|'FI_ID_NUMBER'|'IT_PIN'|'RU_PASSPORT_DATA'|'SE_ID_NUMBER'|'SG_ID_NUMBER'|'VAT_NUMBER',
'Value': 'string'
},
]
},
RegistrantContact={
'FirstName': 'string',
'LastName': 'string',
'ContactType': 'PERSON'|'COMPANY'|'ASSOCIATION'|'PUBLIC_BODY'|'RESELLER',
'OrganizationName': 'string',
'AddressLine1': 'string',
'AddressLine2': 'string',
'City': 'string',
'State': 'string',
'CountryCode': 'AD'|'AE'|'AF'|'AG'|'AI'|'AL'|'AM'|'AN'|'AO'|'AQ'|'AR'|'AS'|'AT'|'AU'|'AW'|'AZ'|'BA'|'BB'|'BD'|'BE'|'BF'|'BG'|'BH'|'BI'|'BJ'|'BL'|'BM'|'BN'|'BO'|'BR'|'BS'|'BT'|'BW'|'BY'|'BZ'|'CA'|'CC'|'CD'|'CF'|'CG'|'CH'|'CI'|'CK'|'CL'|'CM'|'CN'|'CO'|'CR'|'CU'|'CV'|'CX'|'CY'|'CZ'|'DE'|'DJ'|'DK'|'DM'|'DO'|'DZ'|'EC'|'EE'|'EG'|'ER'|'ES'|'ET'|'FI'|'FJ'|'FK'|'FM'|'FO'|'FR'|'GA'|'GB'|'GD'|'GE'|'GH'|'GI'|'GL'|'GM'|'GN'|'GQ'|'GR'|'GT'|'GU'|'GW'|'GY'|'HK'|'HN'|'HR'|'HT'|'HU'|'ID'|'IE'|'IL'|'IM'|'IN'|'IQ'|'IR'|'IS'|'IT'|'JM'|'JO'|'JP'|'KE'|'KG'|'KH'|'KI'|'KM'|'KN'|'KP'|'KR'|'KW'|'KY'|'KZ'|'LA'|'LB'|'LC'|'LI'|'LK'|'LR'|'LS'|'LT'|'LU'|'LV'|'LY'|'MA'|'MC'|'MD'|'ME'|'MF'|'MG'|'MH'|'MK'|'ML'|'MM'|'MN'|'MO'|'MP'|'MR'|'MS'|'MT'|'MU'|'MV'|'MW'|'MX'|'MY'|'MZ'|'NA'|'NC'|'NE'|'NG'|'NI'|'NL'|'NO'|'NP'|'NR'|'NU'|'NZ'|'OM'|'PA'|'PE'|'PF'|'PG'|'PH'|'PK'|'PL'|'PM'|'PN'|'PR'|'PT'|'PW'|'PY'|'QA'|'RO'|'RS'|'RU'|'RW'|'SA'|'SB'|'SC'|'SD'|'SE'|'SG'|'SH'|'SI'|'SK'|'SL'|'SM'|'SN'|'SO'|'SR'|'ST'|'SV'|'SY'|'SZ'|'TC'|'TD'|'TG'|'TH'|'TJ'|'TK'|'TL'|'TM'|'TN'|'TO'|'TR'|'TT'|'TV'|'TW'|'TZ'|'UA'|'UG'|'US'|'UY'|'UZ'|'VA'|'VC'|'VE'|'VG'|'VI'|'VN'|'VU'|'WF'|'WS'|'YE'|'YT'|'ZA'|'ZM'|'ZW',
'ZipCode': 'string',
'PhoneNumber': 'string',
'Email': 'string',
'Fax': 'string',
'ExtraParams': [
{
'Name': 'DUNS_NUMBER'|'BRAND_NUMBER'|'BIRTH_DEPARTMENT'|'BIRTH_DATE_IN_YYYY_MM_DD'|'BIRTH_COUNTRY'|'BIRTH_CITY'|'DOCUMENT_NUMBER'|'AU_ID_NUMBER'|'AU_ID_TYPE'|'CA_LEGAL_TYPE'|'CA_BUSINESS_ENTITY_TYPE'|'ES_IDENTIFICATION'|'ES_IDENTIFICATION_TYPE'|'ES_LEGAL_FORM'|'FI_BUSINESS_NUMBER'|'FI_ID_NUMBER'|'IT_PIN'|'RU_PASSPORT_DATA'|'SE_ID_NUMBER'|'SG_ID_NUMBER'|'VAT_NUMBER',
'Value': 'string'
},
]
},
TechContact={
'FirstName': 'string',
'LastName': 'string',
'ContactType': 'PERSON'|'COMPANY'|'ASSOCIATION'|'PUBLIC_BODY'|'RESELLER',
'OrganizationName': 'string',
'AddressLine1': 'string',
'AddressLine2': 'string',
'City': 'string',
'State': 'string',
'CountryCode': 'AD'|'AE'|'AF'|'AG'|'AI'|'AL'|'AM'|'AN'|'AO'|'AQ'|'AR'|'AS'|'AT'|'AU'|'AW'|'AZ'|'BA'|'BB'|'BD'|'BE'|'BF'|'BG'|'BH'|'BI'|'BJ'|'BL'|'BM'|'BN'|'BO'|'BR'|'BS'|'BT'|'BW'|'BY'|'BZ'|'CA'|'CC'|'CD'|'CF'|'CG'|'CH'|'CI'|'CK'|'CL'|'CM'|'CN'|'CO'|'CR'|'CU'|'CV'|'CX'|'CY'|'CZ'|'DE'|'DJ'|'DK'|'DM'|'DO'|'DZ'|'EC'|'EE'|'EG'|'ER'|'ES'|'ET'|'FI'|'FJ'|'FK'|'FM'|'FO'|'FR'|'GA'|'GB'|'GD'|'GE'|'GH'|'GI'|'GL'|'GM'|'GN'|'GQ'|'GR'|'GT'|'GU'|'GW'|'GY'|'HK'|'HN'|'HR'|'HT'|'HU'|'ID'|'IE'|'IL'|'IM'|'IN'|'IQ'|'IR'|'IS'|'IT'|'JM'|'JO'|'JP'|'KE'|'KG'|'KH'|'KI'|'KM'|'KN'|'KP'|'KR'|'KW'|'KY'|'KZ'|'LA'|'LB'|'LC'|'LI'|'LK'|'LR'|'LS'|'LT'|'LU'|'LV'|'LY'|'MA'|'MC'|'MD'|'ME'|'MF'|'MG'|'MH'|'MK'|'ML'|'MM'|'MN'|'MO'|'MP'|'MR'|'MS'|'MT'|'MU'|'MV'|'MW'|'MX'|'MY'|'MZ'|'NA'|'NC'|'NE'|'NG'|'NI'|'NL'|'NO'|'NP'|'NR'|'NU'|'NZ'|'OM'|'PA'|'PE'|'PF'|'PG'|'PH'|'PK'|'PL'|'PM'|'PN'|'PR'|'PT'|'PW'|'PY'|'QA'|'RO'|'RS'|'RU'|'RW'|'SA'|'SB'|'SC'|'SD'|'SE'|'SG'|'SH'|'SI'|'SK'|'SL'|'SM'|'SN'|'SO'|'SR'|'ST'|'SV'|'SY'|'SZ'|'TC'|'TD'|'TG'|'TH'|'TJ'|'TK'|'TL'|'TM'|'TN'|'TO'|'TR'|'TT'|'TV'|'TW'|'TZ'|'UA'|'UG'|'US'|'UY'|'UZ'|'VA'|'VC'|'VE'|'VG'|'VI'|'VN'|'VU'|'WF'|'WS'|'YE'|'YT'|'ZA'|'ZM'|'ZW',
'ZipCode': 'string',
'PhoneNumber': 'string',
'Email': 'string',
'Fax': 'string',
'ExtraParams': [
{
'Name': 'DUNS_NUMBER'|'BRAND_NUMBER'|'BIRTH_DEPARTMENT'|'BIRTH_DATE_IN_YYYY_MM_DD'|'BIRTH_COUNTRY'|'BIRTH_CITY'|'DOCUMENT_NUMBER'|'AU_ID_NUMBER'|'AU_ID_TYPE'|'CA_LEGAL_TYPE'|'CA_BUSINESS_ENTITY_TYPE'|'ES_IDENTIFICATION'|'ES_IDENTIFICATION_TYPE'|'ES_LEGAL_FORM'|'FI_BUSINESS_NUMBER'|'FI_ID_NUMBER'|'IT_PIN'|'RU_PASSPORT_DATA'|'SE_ID_NUMBER'|'SG_ID_NUMBER'|'VAT_NUMBER',
'Value': 'string'
},
]
},
PrivacyProtectAdminContact=True|False,
PrivacyProtectRegistrantContact=True|False,
PrivacyProtectTechContact=True|False
)
:type DomainName: string
:param DomainName: [REQUIRED]
The domain name that you want to register.
Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.
:type IdnLangCode: string
:param IdnLangCode: Reserved for future use.
:type DurationInYears: integer
:param DurationInYears: [REQUIRED]
The number of years that you want to register the domain for. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide .
Default: 1
:type AutoRenew: boolean
:param AutoRenew: Indicates whether the domain will be automatically renewed (true ) or not (false ). Autorenewal only takes effect after the account is charged.
Default: true
:type AdminContact: dict
:param AdminContact: [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
:type RegistrantContact: dict
:param RegistrantContact: [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
:type TechContact: dict
:param TechContact: [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format '+[country dialing code].[number including any area code]'. For example, a US phone number might appear as '+1.1234567890' .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
:type PrivacyProtectAdminContact: boolean
:param PrivacyProtectAdminContact: Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ('who is') queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
:type PrivacyProtectRegistrantContact: boolean
:param PrivacyProtectRegistrantContact: Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ('who is') queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
:type PrivacyProtectTechContact: boolean
:param PrivacyProtectTechContact: Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ('who is') queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
:rtype: dict
:return: {
'OperationId': 'string'
}
:returns:
DomainName (string) -- [REQUIRED]
The domain name that you want to register.
Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.
IdnLangCode (string) -- Reserved for future use.
DurationInYears (integer) -- [REQUIRED]
The number of years that you want to register the domain for. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide .
Default: 1
AutoRenew (boolean) -- Indicates whether the domain will be automatically renewed (true ) or not (false ). Autorenewal only takes effect after the account is charged.
Default: true
AdminContact (dict) -- [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
RegistrantContact (dict) -- [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
TechContact (dict) -- [REQUIRED]
Provides detailed contact information.
FirstName (string) --First name of contact.
LastName (string) --Last name of contact.
ContactType (string) --Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON , you must enter an organization name, and you can't enable privacy protection for the contact.
OrganizationName (string) --Name of the organization for contact types other than PERSON .
AddressLine1 (string) --First line of the contact's address.
AddressLine2 (string) --Second line of contact's address, if any.
City (string) --The city of the contact's address.
State (string) --The state or province of the contact's city.
CountryCode (string) --Code for the country of the contact's address.
ZipCode (string) --The zip or postal code of the contact's address.
PhoneNumber (string) --The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
Email (string) --Email address of the contact.
Fax (string) --Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country dialing code].[number including any area code]". For example, a US phone number might appear as "+1.1234567890" .
ExtraParams (list) --A list of name-value pairs for parameters required by certain top-level domains.
(dict) --ExtraParam includes the following elements.
Name (string) -- [REQUIRED]Name of the additional parameter required by the top-level domain.
Value (string) -- [REQUIRED]Values corresponding to the additional parameter names required by some top-level domains.
PrivacyProtectAdminContact (boolean) -- Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ("who is") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
PrivacyProtectRegistrantContact (boolean) -- Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ("who is") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
PrivacyProtectTechContact (boolean) -- Whether you want to conceal contact information from WHOIS queries. If you specify true , WHOIS ("who is") queries will return contact information for our registrar partner, Gandi, instead of the contact information that you enter.
Default: true
"""
pass |
def channels_to_byte(self, channels):
"""
:return: int
"""
# pylint: disable-msg=R0201
assert isinstance(channels, list)
result = 0
for offset in range(0, 8):
if offset + 1 in channels:
result = result + (1 << offset)
return result | def function[channels_to_byte, parameter[self, channels]]:
constant[
:return: int
]
assert[call[name[isinstance], parameter[name[channels], name[list]]]]
variable[result] assign[=] constant[0]
for taget[name[offset]] in starred[call[name[range], parameter[constant[0], constant[8]]]] begin[:]
if compare[binary_operation[name[offset] + constant[1]] in name[channels]] begin[:]
variable[result] assign[=] binary_operation[name[result] + binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> name[offset]]]
return[name[result]] | keyword[def] identifier[channels_to_byte] ( identifier[self] , identifier[channels] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[channels] , identifier[list] )
identifier[result] = literal[int]
keyword[for] identifier[offset] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[if] identifier[offset] + literal[int] keyword[in] identifier[channels] :
identifier[result] = identifier[result] +( literal[int] << identifier[offset] )
keyword[return] identifier[result] | def channels_to_byte(self, channels):
"""
:return: int
""" # pylint: disable-msg=R0201
assert isinstance(channels, list)
result = 0
for offset in range(0, 8):
if offset + 1 in channels:
result = result + (1 << offset) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['offset']]
return result |
def get_step_index(self, step=None):
"""
Returns the index for the given `step` name. If no step is given,
the current step will be used to get the index.
"""
if step is None:
step = self.steps.current
return self.get_form_list().keyOrder.index(step) | def function[get_step_index, parameter[self, step]]:
constant[
Returns the index for the given `step` name. If no step is given,
the current step will be used to get the index.
]
if compare[name[step] is constant[None]] begin[:]
variable[step] assign[=] name[self].steps.current
return[call[call[name[self].get_form_list, parameter[]].keyOrder.index, parameter[name[step]]]] | keyword[def] identifier[get_step_index] ( identifier[self] , identifier[step] = keyword[None] ):
literal[string]
keyword[if] identifier[step] keyword[is] keyword[None] :
identifier[step] = identifier[self] . identifier[steps] . identifier[current]
keyword[return] identifier[self] . identifier[get_form_list] (). identifier[keyOrder] . identifier[index] ( identifier[step] ) | def get_step_index(self, step=None):
"""
Returns the index for the given `step` name. If no step is given,
the current step will be used to get the index.
"""
if step is None:
step = self.steps.current # depends on [control=['if'], data=['step']]
return self.get_form_list().keyOrder.index(step) |
def status_get(self, unit):
"""Return the current service status of this unit."""
raw_status, return_code = unit.run(
"status-get --format=json --include-data")
if return_code != 0:
return ("unknown", "")
status = json.loads(raw_status)
return (status["status"], status["message"]) | def function[status_get, parameter[self, unit]]:
constant[Return the current service status of this unit.]
<ast.Tuple object at 0x7da2054a5960> assign[=] call[name[unit].run, parameter[constant[status-get --format=json --include-data]]]
if compare[name[return_code] not_equal[!=] constant[0]] begin[:]
return[tuple[[<ast.Constant object at 0x7da2054a4e50>, <ast.Constant object at 0x7da2054a6b60>]]]
variable[status] assign[=] call[name[json].loads, parameter[name[raw_status]]]
return[tuple[[<ast.Subscript object at 0x7da2054a6c80>, <ast.Subscript object at 0x7da2054a5a50>]]] | keyword[def] identifier[status_get] ( identifier[self] , identifier[unit] ):
literal[string]
identifier[raw_status] , identifier[return_code] = identifier[unit] . identifier[run] (
literal[string] )
keyword[if] identifier[return_code] != literal[int] :
keyword[return] ( literal[string] , literal[string] )
identifier[status] = identifier[json] . identifier[loads] ( identifier[raw_status] )
keyword[return] ( identifier[status] [ literal[string] ], identifier[status] [ literal[string] ]) | def status_get(self, unit):
"""Return the current service status of this unit."""
(raw_status, return_code) = unit.run('status-get --format=json --include-data')
if return_code != 0:
return ('unknown', '') # depends on [control=['if'], data=[]]
status = json.loads(raw_status)
return (status['status'], status['message']) |
def format_time(date_obj, time_obj=None, datebox=False, dt_type=None, classes=None):
"""
Returns formatted HTML5 elements based on given datetime object.
By default returns a time element, but will return a .datebox if requested.
dt_type allows passing dt_start or dt_end for hcal formatting.
link allows passing a url to the datebox.
classes allows sending arbitrary classnames. Useful for properly microformatting elements.
Usage::
{% format_time obj.pub_date %}
{% format_time obj.start_date 'datebox' 'dtstart' %}
{% format_time obj.end_date obj.end_time 'datebox' 'dt_end' %}
"""
if not time_obj:
time_obj = getattr(date_obj, 'time', None)
if dt_type:
classes = '{0} {1}'.format(classes, dt_type)
if datebox:
classes = '{0} {1}'.format(classes, datebox)
return {
'date_obj': date_obj,
'time_obj': time_obj,
'datebox': datebox,
'current_year': datetime.date.today().year,
'classes': classes
} | def function[format_time, parameter[date_obj, time_obj, datebox, dt_type, classes]]:
constant[
Returns formatted HTML5 elements based on given datetime object.
By default returns a time element, but will return a .datebox if requested.
dt_type allows passing dt_start or dt_end for hcal formatting.
link allows passing a url to the datebox.
classes allows sending arbitrary classnames. Useful for properly microformatting elements.
Usage::
{% format_time obj.pub_date %}
{% format_time obj.start_date 'datebox' 'dtstart' %}
{% format_time obj.end_date obj.end_time 'datebox' 'dt_end' %}
]
if <ast.UnaryOp object at 0x7da20e962e30> begin[:]
variable[time_obj] assign[=] call[name[getattr], parameter[name[date_obj], constant[time], constant[None]]]
if name[dt_type] begin[:]
variable[classes] assign[=] call[constant[{0} {1}].format, parameter[name[classes], name[dt_type]]]
if name[datebox] begin[:]
variable[classes] assign[=] call[constant[{0} {1}].format, parameter[name[classes], name[datebox]]]
return[dictionary[[<ast.Constant object at 0x7da20e960070>, <ast.Constant object at 0x7da20e960550>, <ast.Constant object at 0x7da20e962d70>, <ast.Constant object at 0x7da20e9602e0>, <ast.Constant object at 0x7da20e962f80>], [<ast.Name object at 0x7da20e963700>, <ast.Name object at 0x7da20e961f60>, <ast.Name object at 0x7da20e960af0>, <ast.Attribute object at 0x7da20e9605b0>, <ast.Name object at 0x7da20e9612d0>]]] | keyword[def] identifier[format_time] ( identifier[date_obj] , identifier[time_obj] = keyword[None] , identifier[datebox] = keyword[False] , identifier[dt_type] = keyword[None] , identifier[classes] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[time_obj] :
identifier[time_obj] = identifier[getattr] ( identifier[date_obj] , literal[string] , keyword[None] )
keyword[if] identifier[dt_type] :
identifier[classes] = literal[string] . identifier[format] ( identifier[classes] , identifier[dt_type] )
keyword[if] identifier[datebox] :
identifier[classes] = literal[string] . identifier[format] ( identifier[classes] , identifier[datebox] )
keyword[return] {
literal[string] : identifier[date_obj] ,
literal[string] : identifier[time_obj] ,
literal[string] : identifier[datebox] ,
literal[string] : identifier[datetime] . identifier[date] . identifier[today] (). identifier[year] ,
literal[string] : identifier[classes]
} | def format_time(date_obj, time_obj=None, datebox=False, dt_type=None, classes=None):
"""
Returns formatted HTML5 elements based on given datetime object.
By default returns a time element, but will return a .datebox if requested.
dt_type allows passing dt_start or dt_end for hcal formatting.
link allows passing a url to the datebox.
classes allows sending arbitrary classnames. Useful for properly microformatting elements.
Usage::
{% format_time obj.pub_date %}
{% format_time obj.start_date 'datebox' 'dtstart' %}
{% format_time obj.end_date obj.end_time 'datebox' 'dt_end' %}
"""
if not time_obj:
time_obj = getattr(date_obj, 'time', None) # depends on [control=['if'], data=[]]
if dt_type:
classes = '{0} {1}'.format(classes, dt_type) # depends on [control=['if'], data=[]]
if datebox:
classes = '{0} {1}'.format(classes, datebox) # depends on [control=['if'], data=[]]
return {'date_obj': date_obj, 'time_obj': time_obj, 'datebox': datebox, 'current_year': datetime.date.today().year, 'classes': classes} |
def _define_absl_flag(self, flag_instance, suppress):
"""Defines a flag from the flag_instance."""
flag_name = flag_instance.name
short_name = flag_instance.short_name
argument_names = ['--' + flag_name]
if short_name:
argument_names.insert(0, '-' + short_name)
if suppress:
helptext = argparse.SUPPRESS
else:
# argparse help string uses %-formatting. Escape the literal %'s.
helptext = flag_instance.help.replace('%', '%%')
if flag_instance.boolean:
# Only add the `no` form to the long name.
argument_names.append('--no' + flag_name)
self.add_argument(
*argument_names, action=_BooleanFlagAction, help=helptext,
metavar=flag_instance.name.upper(),
flag_instance=flag_instance)
else:
self.add_argument(
*argument_names, action=_FlagAction, help=helptext,
metavar=flag_instance.name.upper(),
flag_instance=flag_instance) | def function[_define_absl_flag, parameter[self, flag_instance, suppress]]:
constant[Defines a flag from the flag_instance.]
variable[flag_name] assign[=] name[flag_instance].name
variable[short_name] assign[=] name[flag_instance].short_name
variable[argument_names] assign[=] list[[<ast.BinOp object at 0x7da1b184c460>]]
if name[short_name] begin[:]
call[name[argument_names].insert, parameter[constant[0], binary_operation[constant[-] + name[short_name]]]]
if name[suppress] begin[:]
variable[helptext] assign[=] name[argparse].SUPPRESS
if name[flag_instance].boolean begin[:]
call[name[argument_names].append, parameter[binary_operation[constant[--no] + name[flag_name]]]]
call[name[self].add_argument, parameter[<ast.Starred object at 0x7da1b184c220>]] | keyword[def] identifier[_define_absl_flag] ( identifier[self] , identifier[flag_instance] , identifier[suppress] ):
literal[string]
identifier[flag_name] = identifier[flag_instance] . identifier[name]
identifier[short_name] = identifier[flag_instance] . identifier[short_name]
identifier[argument_names] =[ literal[string] + identifier[flag_name] ]
keyword[if] identifier[short_name] :
identifier[argument_names] . identifier[insert] ( literal[int] , literal[string] + identifier[short_name] )
keyword[if] identifier[suppress] :
identifier[helptext] = identifier[argparse] . identifier[SUPPRESS]
keyword[else] :
identifier[helptext] = identifier[flag_instance] . identifier[help] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[flag_instance] . identifier[boolean] :
identifier[argument_names] . identifier[append] ( literal[string] + identifier[flag_name] )
identifier[self] . identifier[add_argument] (
* identifier[argument_names] , identifier[action] = identifier[_BooleanFlagAction] , identifier[help] = identifier[helptext] ,
identifier[metavar] = identifier[flag_instance] . identifier[name] . identifier[upper] (),
identifier[flag_instance] = identifier[flag_instance] )
keyword[else] :
identifier[self] . identifier[add_argument] (
* identifier[argument_names] , identifier[action] = identifier[_FlagAction] , identifier[help] = identifier[helptext] ,
identifier[metavar] = identifier[flag_instance] . identifier[name] . identifier[upper] (),
identifier[flag_instance] = identifier[flag_instance] ) | def _define_absl_flag(self, flag_instance, suppress):
"""Defines a flag from the flag_instance."""
flag_name = flag_instance.name
short_name = flag_instance.short_name
argument_names = ['--' + flag_name]
if short_name:
argument_names.insert(0, '-' + short_name) # depends on [control=['if'], data=[]]
if suppress:
helptext = argparse.SUPPRESS # depends on [control=['if'], data=[]]
else:
# argparse help string uses %-formatting. Escape the literal %'s.
helptext = flag_instance.help.replace('%', '%%')
if flag_instance.boolean:
# Only add the `no` form to the long name.
argument_names.append('--no' + flag_name)
self.add_argument(*argument_names, action=_BooleanFlagAction, help=helptext, metavar=flag_instance.name.upper(), flag_instance=flag_instance) # depends on [control=['if'], data=[]]
else:
self.add_argument(*argument_names, action=_FlagAction, help=helptext, metavar=flag_instance.name.upper(), flag_instance=flag_instance) |
def update(self):
""" Upates the user's SDB inventory
Loops through all items on a page and checks for an item
that has changed. A changed item is identified as the remove
attribute being set to anything greater than 0. It will then
update each page accordingly with the changed items.
Returns
bool - True if successful, False otherwise
"""
for x in range(1, self.inventory.pages + 1):
if self._hasPageChanged(x):
form = self._updateForm(x)
form.usePin = True
pg = form.submit()
# Success redirects to SDB page
if "Your Safety Deposit Box" in pg.content:
return True
else:
logging.getLogger("neolib.shop").exception("Could not verify if SDB inventory was updated.", {'pg': pg})
return False | def function[update, parameter[self]]:
constant[ Upates the user's SDB inventory
Loops through all items on a page and checks for an item
that has changed. A changed item is identified as the remove
attribute being set to anything greater than 0. It will then
update each page accordingly with the changed items.
Returns
bool - True if successful, False otherwise
]
for taget[name[x]] in starred[call[name[range], parameter[constant[1], binary_operation[name[self].inventory.pages + constant[1]]]]] begin[:]
if call[name[self]._hasPageChanged, parameter[name[x]]] begin[:]
variable[form] assign[=] call[name[self]._updateForm, parameter[name[x]]]
name[form].usePin assign[=] constant[True]
variable[pg] assign[=] call[name[form].submit, parameter[]]
if compare[constant[Your Safety Deposit Box] in name[pg].content] begin[:]
return[constant[True]] | keyword[def] identifier[update] ( identifier[self] ):
literal[string]
keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[inventory] . identifier[pages] + literal[int] ):
keyword[if] identifier[self] . identifier[_hasPageChanged] ( identifier[x] ):
identifier[form] = identifier[self] . identifier[_updateForm] ( identifier[x] )
identifier[form] . identifier[usePin] = keyword[True]
identifier[pg] = identifier[form] . identifier[submit] ()
keyword[if] literal[string] keyword[in] identifier[pg] . identifier[content] :
keyword[return] keyword[True]
keyword[else] :
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[exception] ( literal[string] ,{ literal[string] : identifier[pg] })
keyword[return] keyword[False] | def update(self):
""" Upates the user's SDB inventory
Loops through all items on a page and checks for an item
that has changed. A changed item is identified as the remove
attribute being set to anything greater than 0. It will then
update each page accordingly with the changed items.
Returns
bool - True if successful, False otherwise
"""
for x in range(1, self.inventory.pages + 1):
if self._hasPageChanged(x):
form = self._updateForm(x)
form.usePin = True
pg = form.submit()
# Success redirects to SDB page
if 'Your Safety Deposit Box' in pg.content:
return True # depends on [control=['if'], data=[]]
else:
logging.getLogger('neolib.shop').exception('Could not verify if SDB inventory was updated.', {'pg': pg})
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] |
def make_simlinks(dest_dir, paths_list):
"""
TODO docstrings
"""
for path in paths_list:
dest = os.path.join(dest_dir, os.path.split(path)[-1])
if os.path.exists(dest):
if os.path.islink(dest):
os.remove(dest)
else:
sys.stderr.write('A file or dir named {} already exists, skipping...\n'.format(dest))
continue
os.symlink(path, dest) | def function[make_simlinks, parameter[dest_dir, paths_list]]:
constant[
TODO docstrings
]
for taget[name[path]] in starred[name[paths_list]] begin[:]
variable[dest] assign[=] call[name[os].path.join, parameter[name[dest_dir], call[call[name[os].path.split, parameter[name[path]]]][<ast.UnaryOp object at 0x7da20c795750>]]]
if call[name[os].path.exists, parameter[name[dest]]] begin[:]
if call[name[os].path.islink, parameter[name[dest]]] begin[:]
call[name[os].remove, parameter[name[dest]]]
call[name[os].symlink, parameter[name[path], name[dest]]] | keyword[def] identifier[make_simlinks] ( identifier[dest_dir] , identifier[paths_list] ):
literal[string]
keyword[for] identifier[path] keyword[in] identifier[paths_list] :
identifier[dest] = identifier[os] . identifier[path] . identifier[join] ( identifier[dest_dir] , identifier[os] . identifier[path] . identifier[split] ( identifier[path] )[- literal[int] ])
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[dest] ):
keyword[if] identifier[os] . identifier[path] . identifier[islink] ( identifier[dest] ):
identifier[os] . identifier[remove] ( identifier[dest] )
keyword[else] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] . identifier[format] ( identifier[dest] ))
keyword[continue]
identifier[os] . identifier[symlink] ( identifier[path] , identifier[dest] ) | def make_simlinks(dest_dir, paths_list):
"""
TODO docstrings
"""
for path in paths_list:
dest = os.path.join(dest_dir, os.path.split(path)[-1])
if os.path.exists(dest):
if os.path.islink(dest):
os.remove(dest) # depends on [control=['if'], data=[]]
else:
sys.stderr.write('A file or dir named {} already exists, skipping...\n'.format(dest))
continue # depends on [control=['if'], data=[]]
os.symlink(path, dest) # depends on [control=['for'], data=['path']] |
def create(self, vips):
"""
Method to create vip's
:param vips: List containing vip's desired to be created on database
:return: None
"""
data = {'vips': vips}
return super(ApiVipRequest, self).post('api/v3/vip-request/', data) | def function[create, parameter[self, vips]]:
constant[
Method to create vip's
:param vips: List containing vip's desired to be created on database
:return: None
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da2047eb2e0>], [<ast.Name object at 0x7da2047e81f0>]]
return[call[call[name[super], parameter[name[ApiVipRequest], name[self]]].post, parameter[constant[api/v3/vip-request/], name[data]]]] | keyword[def] identifier[create] ( identifier[self] , identifier[vips] ):
literal[string]
identifier[data] ={ literal[string] : identifier[vips] }
keyword[return] identifier[super] ( identifier[ApiVipRequest] , identifier[self] ). identifier[post] ( literal[string] , identifier[data] ) | def create(self, vips):
"""
Method to create vip's
:param vips: List containing vip's desired to be created on database
:return: None
"""
data = {'vips': vips}
return super(ApiVipRequest, self).post('api/v3/vip-request/', data) |
def represent_as(self, new_pos, new_vel=None):
"""
Represent the position and velocity of the orbit in an alternate
coordinate system. Supports any of the Astropy coordinates
representation classes.
Parameters
----------
new_pos : :class:`~astropy.coordinates.BaseRepresentation`
The type of representation to generate. Must be a class (not an
instance), or the string name of the representation class.
new_vel : :class:`~astropy.coordinates.BaseDifferential` (optional)
Class in which any velocities should be represented. Must be a class
(not an instance), or the string name of the differential class. If
None, uses the default differential for the new position class.
Returns
-------
new_orbit : `gala.dynamics.Orbit`
"""
o = super(Orbit, self).represent_as(new_pos=new_pos, new_vel=new_vel)
return self.__class__(pos=o.pos,
vel=o.vel,
hamiltonian=self.hamiltonian) | def function[represent_as, parameter[self, new_pos, new_vel]]:
constant[
Represent the position and velocity of the orbit in an alternate
coordinate system. Supports any of the Astropy coordinates
representation classes.
Parameters
----------
new_pos : :class:`~astropy.coordinates.BaseRepresentation`
The type of representation to generate. Must be a class (not an
instance), or the string name of the representation class.
new_vel : :class:`~astropy.coordinates.BaseDifferential` (optional)
Class in which any velocities should be represented. Must be a class
(not an instance), or the string name of the differential class. If
None, uses the default differential for the new position class.
Returns
-------
new_orbit : `gala.dynamics.Orbit`
]
variable[o] assign[=] call[call[name[super], parameter[name[Orbit], name[self]]].represent_as, parameter[]]
return[call[name[self].__class__, parameter[]]] | keyword[def] identifier[represent_as] ( identifier[self] , identifier[new_pos] , identifier[new_vel] = keyword[None] ):
literal[string]
identifier[o] = identifier[super] ( identifier[Orbit] , identifier[self] ). identifier[represent_as] ( identifier[new_pos] = identifier[new_pos] , identifier[new_vel] = identifier[new_vel] )
keyword[return] identifier[self] . identifier[__class__] ( identifier[pos] = identifier[o] . identifier[pos] ,
identifier[vel] = identifier[o] . identifier[vel] ,
identifier[hamiltonian] = identifier[self] . identifier[hamiltonian] ) | def represent_as(self, new_pos, new_vel=None):
"""
Represent the position and velocity of the orbit in an alternate
coordinate system. Supports any of the Astropy coordinates
representation classes.
Parameters
----------
new_pos : :class:`~astropy.coordinates.BaseRepresentation`
The type of representation to generate. Must be a class (not an
instance), or the string name of the representation class.
new_vel : :class:`~astropy.coordinates.BaseDifferential` (optional)
Class in which any velocities should be represented. Must be a class
(not an instance), or the string name of the differential class. If
None, uses the default differential for the new position class.
Returns
-------
new_orbit : `gala.dynamics.Orbit`
"""
o = super(Orbit, self).represent_as(new_pos=new_pos, new_vel=new_vel)
return self.__class__(pos=o.pos, vel=o.vel, hamiltonian=self.hamiltonian) |
def add(a, b, allow_overflow=False):
"""Adds two instances of `Money`.
Args:
a (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): one money
value
b (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): another
money value
allow_overflow: determines if the addition is allowed to overflow
Return:
`Money`: an instance of Money
Raises:
ValueError: if the inputs do not have the same currency code
OverflowError: if the sum overflows and allow_overflow is not `True`
"""
for m in (a, b):
if not isinstance(m, sc_messages.Money):
raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,))
if a.currencyCode != b.currencyCode:
raise ValueError(u'Money values need the same currency to be summed')
nano_carry, nanos_sum = _sum_nanos(a, b)
units_sum_no_carry = a.units + b.units
units_sum = units_sum_no_carry + nano_carry
# Adjust when units_sum and nanos_sum have different signs
if units_sum > 0 and nanos_sum < 0:
units_sum -= 1
nanos_sum += _BILLION
elif units_sum < 0 and nanos_sum > 0:
units_sum += 1
nanos_sum -= _BILLION
# Return the result, detecting overflow if it occurs
sign_a = _sign_of(a)
sign_b = _sign_of(b)
if sign_a > 0 and sign_b > 0 and units_sum >= _INT64_MAX:
if not allow_overflow:
raise OverflowError(u'Money addition positive overflow')
else:
return sc_messages.Money(units=_INT64_MAX,
nanos=MAX_NANOS,
currencyCode=a.currencyCode)
elif (sign_a < 0 and sign_b < 0 and
(units_sum_no_carry <= -_INT64_MAX or units_sum <= -_INT64_MAX)):
if not allow_overflow:
raise OverflowError(u'Money addition negative overflow')
else:
return sc_messages.Money(units=_INT64_MIN,
nanos=-MAX_NANOS,
currencyCode=a.currencyCode)
else:
return sc_messages.Money(units=units_sum,
nanos=nanos_sum,
currencyCode=a.currencyCode) | def function[add, parameter[a, b, allow_overflow]]:
constant[Adds two instances of `Money`.
Args:
a (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): one money
value
b (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): another
money value
allow_overflow: determines if the addition is allowed to overflow
Return:
`Money`: an instance of Money
Raises:
ValueError: if the inputs do not have the same currency code
OverflowError: if the sum overflows and allow_overflow is not `True`
]
for taget[name[m]] in starred[tuple[[<ast.Name object at 0x7da18bc73310>, <ast.Name object at 0x7da18bc73b20>]]] begin[:]
if <ast.UnaryOp object at 0x7da18bc71810> begin[:]
<ast.Raise object at 0x7da18bc708b0>
if compare[name[a].currencyCode not_equal[!=] name[b].currencyCode] begin[:]
<ast.Raise object at 0x7da18bc73400>
<ast.Tuple object at 0x7da18bc727d0> assign[=] call[name[_sum_nanos], parameter[name[a], name[b]]]
variable[units_sum_no_carry] assign[=] binary_operation[name[a].units + name[b].units]
variable[units_sum] assign[=] binary_operation[name[units_sum_no_carry] + name[nano_carry]]
if <ast.BoolOp object at 0x7da1b0472590> begin[:]
<ast.AugAssign object at 0x7da1b04703a0>
<ast.AugAssign object at 0x7da1b0470340>
variable[sign_a] assign[=] call[name[_sign_of], parameter[name[a]]]
variable[sign_b] assign[=] call[name[_sign_of], parameter[name[b]]]
if <ast.BoolOp object at 0x7da1b0473ee0> begin[:]
if <ast.UnaryOp object at 0x7da1b0471780> begin[:]
<ast.Raise object at 0x7da1b04703d0> | keyword[def] identifier[add] ( identifier[a] , identifier[b] , identifier[allow_overflow] = keyword[False] ):
literal[string]
keyword[for] identifier[m] keyword[in] ( identifier[a] , identifier[b] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[m] , identifier[sc_messages] . identifier[Money] ):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[sc_messages] . identifier[Money] ,))
keyword[if] identifier[a] . identifier[currencyCode] != identifier[b] . identifier[currencyCode] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[nano_carry] , identifier[nanos_sum] = identifier[_sum_nanos] ( identifier[a] , identifier[b] )
identifier[units_sum_no_carry] = identifier[a] . identifier[units] + identifier[b] . identifier[units]
identifier[units_sum] = identifier[units_sum_no_carry] + identifier[nano_carry]
keyword[if] identifier[units_sum] > literal[int] keyword[and] identifier[nanos_sum] < literal[int] :
identifier[units_sum] -= literal[int]
identifier[nanos_sum] += identifier[_BILLION]
keyword[elif] identifier[units_sum] < literal[int] keyword[and] identifier[nanos_sum] > literal[int] :
identifier[units_sum] += literal[int]
identifier[nanos_sum] -= identifier[_BILLION]
identifier[sign_a] = identifier[_sign_of] ( identifier[a] )
identifier[sign_b] = identifier[_sign_of] ( identifier[b] )
keyword[if] identifier[sign_a] > literal[int] keyword[and] identifier[sign_b] > literal[int] keyword[and] identifier[units_sum] >= identifier[_INT64_MAX] :
keyword[if] keyword[not] identifier[allow_overflow] :
keyword[raise] identifier[OverflowError] ( literal[string] )
keyword[else] :
keyword[return] identifier[sc_messages] . identifier[Money] ( identifier[units] = identifier[_INT64_MAX] ,
identifier[nanos] = identifier[MAX_NANOS] ,
identifier[currencyCode] = identifier[a] . identifier[currencyCode] )
keyword[elif] ( identifier[sign_a] < literal[int] keyword[and] identifier[sign_b] < literal[int] keyword[and]
( identifier[units_sum_no_carry] <=- identifier[_INT64_MAX] keyword[or] identifier[units_sum] <=- identifier[_INT64_MAX] )):
keyword[if] keyword[not] identifier[allow_overflow] :
keyword[raise] identifier[OverflowError] ( literal[string] )
keyword[else] :
keyword[return] identifier[sc_messages] . identifier[Money] ( identifier[units] = identifier[_INT64_MIN] ,
identifier[nanos] =- identifier[MAX_NANOS] ,
identifier[currencyCode] = identifier[a] . identifier[currencyCode] )
keyword[else] :
keyword[return] identifier[sc_messages] . identifier[Money] ( identifier[units] = identifier[units_sum] ,
identifier[nanos] = identifier[nanos_sum] ,
identifier[currencyCode] = identifier[a] . identifier[currencyCode] ) | def add(a, b, allow_overflow=False):
"""Adds two instances of `Money`.
Args:
a (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): one money
value
b (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): another
money value
allow_overflow: determines if the addition is allowed to overflow
Return:
`Money`: an instance of Money
Raises:
ValueError: if the inputs do not have the same currency code
OverflowError: if the sum overflows and allow_overflow is not `True`
"""
for m in (a, b):
if not isinstance(m, sc_messages.Money):
raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']]
if a.currencyCode != b.currencyCode:
raise ValueError(u'Money values need the same currency to be summed') # depends on [control=['if'], data=[]]
(nano_carry, nanos_sum) = _sum_nanos(a, b)
units_sum_no_carry = a.units + b.units
units_sum = units_sum_no_carry + nano_carry
# Adjust when units_sum and nanos_sum have different signs
if units_sum > 0 and nanos_sum < 0:
units_sum -= 1
nanos_sum += _BILLION # depends on [control=['if'], data=[]]
elif units_sum < 0 and nanos_sum > 0:
units_sum += 1
nanos_sum -= _BILLION # depends on [control=['if'], data=[]]
# Return the result, detecting overflow if it occurs
sign_a = _sign_of(a)
sign_b = _sign_of(b)
if sign_a > 0 and sign_b > 0 and (units_sum >= _INT64_MAX):
if not allow_overflow:
raise OverflowError(u'Money addition positive overflow') # depends on [control=['if'], data=[]]
else:
return sc_messages.Money(units=_INT64_MAX, nanos=MAX_NANOS, currencyCode=a.currencyCode) # depends on [control=['if'], data=[]]
elif sign_a < 0 and sign_b < 0 and (units_sum_no_carry <= -_INT64_MAX or units_sum <= -_INT64_MAX):
if not allow_overflow:
raise OverflowError(u'Money addition negative overflow') # depends on [control=['if'], data=[]]
else:
return sc_messages.Money(units=_INT64_MIN, nanos=-MAX_NANOS, currencyCode=a.currencyCode) # depends on [control=['if'], data=[]]
else:
return sc_messages.Money(units=units_sum, nanos=nanos_sum, currencyCode=a.currencyCode) |
def export(self, view, include=None):
"""
Export a view. Returns an Export object.
:param include: list of objects to sideload. `Side-loading API Docs
:param view: View or view id
:return:
"""
return self._get(self._build_url(self.endpoint.export(id=view, include=include))) | def function[export, parameter[self, view, include]]:
constant[
Export a view. Returns an Export object.
:param include: list of objects to sideload. `Side-loading API Docs
:param view: View or view id
:return:
]
return[call[name[self]._get, parameter[call[name[self]._build_url, parameter[call[name[self].endpoint.export, parameter[]]]]]]] | keyword[def] identifier[export] ( identifier[self] , identifier[view] , identifier[include] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_get] ( identifier[self] . identifier[_build_url] ( identifier[self] . identifier[endpoint] . identifier[export] ( identifier[id] = identifier[view] , identifier[include] = identifier[include] ))) | def export(self, view, include=None):
"""
Export a view. Returns an Export object.
:param include: list of objects to sideload. `Side-loading API Docs
:param view: View or view id
:return:
"""
return self._get(self._build_url(self.endpoint.export(id=view, include=include))) |
def pairwise(iterable):
"""Pair each element with its neighbors.
Arguments
---------
iterable : iterable
Returns
-------
The generator produces a tuple containing a pairing of each element with
its neighbor.
"""
iterable = iter(iterable)
left = next(iterable)
for right in iterable:
yield left, right
left = right | def function[pairwise, parameter[iterable]]:
constant[Pair each element with its neighbors.
Arguments
---------
iterable : iterable
Returns
-------
The generator produces a tuple containing a pairing of each element with
its neighbor.
]
variable[iterable] assign[=] call[name[iter], parameter[name[iterable]]]
variable[left] assign[=] call[name[next], parameter[name[iterable]]]
for taget[name[right]] in starred[name[iterable]] begin[:]
<ast.Yield object at 0x7da1b172a830>
variable[left] assign[=] name[right] | keyword[def] identifier[pairwise] ( identifier[iterable] ):
literal[string]
identifier[iterable] = identifier[iter] ( identifier[iterable] )
identifier[left] = identifier[next] ( identifier[iterable] )
keyword[for] identifier[right] keyword[in] identifier[iterable] :
keyword[yield] identifier[left] , identifier[right]
identifier[left] = identifier[right] | def pairwise(iterable):
"""Pair each element with its neighbors.
Arguments
---------
iterable : iterable
Returns
-------
The generator produces a tuple containing a pairing of each element with
its neighbor.
"""
iterable = iter(iterable)
left = next(iterable)
for right in iterable:
yield (left, right)
left = right # depends on [control=['for'], data=['right']] |
def get_version_naive(cls, name, ignore=''):
""" Checks a string for a possible version of an object (no prefix, no suffix) without filtering date out
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
"""
match = cls._get_regex_search(name, cls.REGEX_VERSION.format(SEP=cls.REGEX_SEPARATORS), ignore=ignore)
if match is not None:
if len(match) > 1:
for m in match:
m.update({'version': int(m['match'].upper().replace('V', ''))})
compound_version = '.'.join([str(m['version']) for m in match])
compound_version = float(compound_version) if compound_version.count('.') == 1 else compound_version
return {'compound_matches': match,
'compound_version': compound_version,
'pattern': match[0]['pattern'],
'input': match[0]['input']}
elif len(match) == 1:
match = match[0]
match.update({'version': int(match['match'].upper().replace('V', ''))})
return match
return None | def function[get_version_naive, parameter[cls, name, ignore]]:
constant[ Checks a string for a possible version of an object (no prefix, no suffix) without filtering date out
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
]
variable[match] assign[=] call[name[cls]._get_regex_search, parameter[name[name], call[name[cls].REGEX_VERSION.format, parameter[]]]]
if compare[name[match] is_not constant[None]] begin[:]
if compare[call[name[len], parameter[name[match]]] greater[>] constant[1]] begin[:]
for taget[name[m]] in starred[name[match]] begin[:]
call[name[m].update, parameter[dictionary[[<ast.Constant object at 0x7da207f9bc10>], [<ast.Call object at 0x7da207f993f0>]]]]
variable[compound_version] assign[=] call[constant[.].join, parameter[<ast.ListComp object at 0x7da18ede5630>]]
variable[compound_version] assign[=] <ast.IfExp object at 0x7da18ede6890>
return[dictionary[[<ast.Constant object at 0x7da207f02350>, <ast.Constant object at 0x7da207f01ff0>, <ast.Constant object at 0x7da207f00220>, <ast.Constant object at 0x7da207f034f0>], [<ast.Name object at 0x7da207f00880>, <ast.Name object at 0x7da207f02080>, <ast.Subscript object at 0x7da207f039d0>, <ast.Subscript object at 0x7da207f03190>]]]
return[constant[None]] | keyword[def] identifier[get_version_naive] ( identifier[cls] , identifier[name] , identifier[ignore] = literal[string] ):
literal[string]
identifier[match] = identifier[cls] . identifier[_get_regex_search] ( identifier[name] , identifier[cls] . identifier[REGEX_VERSION] . identifier[format] ( identifier[SEP] = identifier[cls] . identifier[REGEX_SEPARATORS] ), identifier[ignore] = identifier[ignore] )
keyword[if] identifier[match] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[len] ( identifier[match] )> literal[int] :
keyword[for] identifier[m] keyword[in] identifier[match] :
identifier[m] . identifier[update] ({ literal[string] : identifier[int] ( identifier[m] [ literal[string] ]. identifier[upper] (). identifier[replace] ( literal[string] , literal[string] ))})
identifier[compound_version] = literal[string] . identifier[join] ([ identifier[str] ( identifier[m] [ literal[string] ]) keyword[for] identifier[m] keyword[in] identifier[match] ])
identifier[compound_version] = identifier[float] ( identifier[compound_version] ) keyword[if] identifier[compound_version] . identifier[count] ( literal[string] )== literal[int] keyword[else] identifier[compound_version]
keyword[return] { literal[string] : identifier[match] ,
literal[string] : identifier[compound_version] ,
literal[string] : identifier[match] [ literal[int] ][ literal[string] ],
literal[string] : identifier[match] [ literal[int] ][ literal[string] ]}
keyword[elif] identifier[len] ( identifier[match] )== literal[int] :
identifier[match] = identifier[match] [ literal[int] ]
identifier[match] . identifier[update] ({ literal[string] : identifier[int] ( identifier[match] [ literal[string] ]. identifier[upper] (). identifier[replace] ( literal[string] , literal[string] ))})
keyword[return] identifier[match]
keyword[return] keyword[None] | def get_version_naive(cls, name, ignore=''):
""" Checks a string for a possible version of an object (no prefix, no suffix) without filtering date out
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
"""
match = cls._get_regex_search(name, cls.REGEX_VERSION.format(SEP=cls.REGEX_SEPARATORS), ignore=ignore)
if match is not None:
if len(match) > 1:
for m in match:
m.update({'version': int(m['match'].upper().replace('V', ''))}) # depends on [control=['for'], data=['m']]
compound_version = '.'.join([str(m['version']) for m in match])
compound_version = float(compound_version) if compound_version.count('.') == 1 else compound_version
return {'compound_matches': match, 'compound_version': compound_version, 'pattern': match[0]['pattern'], 'input': match[0]['input']} # depends on [control=['if'], data=[]]
elif len(match) == 1:
match = match[0]
match.update({'version': int(match['match'].upper().replace('V', ''))})
return match # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['match']]
return None |
def get_queryset(self):
'''
Parameters are already validated in the QuerySetPermission
'''
model_type = self.request.GET.get("type")
pk = self.request.GET.get("id")
content_type_model = ContentType.objects.get(model=model_type.lower())
Model = content_type_model.model_class()
model_obj = Model.objects.filter(id=pk).first()
return Comment.objects.filter_by_object(model_obj) | def function[get_queryset, parameter[self]]:
constant[
Parameters are already validated in the QuerySetPermission
]
variable[model_type] assign[=] call[name[self].request.GET.get, parameter[constant[type]]]
variable[pk] assign[=] call[name[self].request.GET.get, parameter[constant[id]]]
variable[content_type_model] assign[=] call[name[ContentType].objects.get, parameter[]]
variable[Model] assign[=] call[name[content_type_model].model_class, parameter[]]
variable[model_obj] assign[=] call[call[name[Model].objects.filter, parameter[]].first, parameter[]]
return[call[name[Comment].objects.filter_by_object, parameter[name[model_obj]]]] | keyword[def] identifier[get_queryset] ( identifier[self] ):
literal[string]
identifier[model_type] = identifier[self] . identifier[request] . identifier[GET] . identifier[get] ( literal[string] )
identifier[pk] = identifier[self] . identifier[request] . identifier[GET] . identifier[get] ( literal[string] )
identifier[content_type_model] = identifier[ContentType] . identifier[objects] . identifier[get] ( identifier[model] = identifier[model_type] . identifier[lower] ())
identifier[Model] = identifier[content_type_model] . identifier[model_class] ()
identifier[model_obj] = identifier[Model] . identifier[objects] . identifier[filter] ( identifier[id] = identifier[pk] ). identifier[first] ()
keyword[return] identifier[Comment] . identifier[objects] . identifier[filter_by_object] ( identifier[model_obj] ) | def get_queryset(self):
"""
Parameters are already validated in the QuerySetPermission
"""
model_type = self.request.GET.get('type')
pk = self.request.GET.get('id')
content_type_model = ContentType.objects.get(model=model_type.lower())
Model = content_type_model.model_class()
model_obj = Model.objects.filter(id=pk).first()
return Comment.objects.filter_by_object(model_obj) |
def _show_loading_page(self):
"""Show animation while the kernel is loading."""
self.shellwidget.hide()
self.infowidget.show()
self.info_page = self.loading_page
self.set_info_page() | def function[_show_loading_page, parameter[self]]:
constant[Show animation while the kernel is loading.]
call[name[self].shellwidget.hide, parameter[]]
call[name[self].infowidget.show, parameter[]]
name[self].info_page assign[=] name[self].loading_page
call[name[self].set_info_page, parameter[]] | keyword[def] identifier[_show_loading_page] ( identifier[self] ):
literal[string]
identifier[self] . identifier[shellwidget] . identifier[hide] ()
identifier[self] . identifier[infowidget] . identifier[show] ()
identifier[self] . identifier[info_page] = identifier[self] . identifier[loading_page]
identifier[self] . identifier[set_info_page] () | def _show_loading_page(self):
"""Show animation while the kernel is loading."""
self.shellwidget.hide()
self.infowidget.show()
self.info_page = self.loading_page
self.set_info_page() |
def _pdf(self, phi):
"""
Evaluate the _unnormalized_ flow PDF.
"""
pdf = np.inner(self._vn, np.cos(np.outer(phi, self._n)))
pdf *= 2.
pdf += 1.
return pdf | def function[_pdf, parameter[self, phi]]:
constant[
Evaluate the _unnormalized_ flow PDF.
]
variable[pdf] assign[=] call[name[np].inner, parameter[name[self]._vn, call[name[np].cos, parameter[call[name[np].outer, parameter[name[phi], name[self]._n]]]]]]
<ast.AugAssign object at 0x7da1b10b07f0>
<ast.AugAssign object at 0x7da1b10b1ab0>
return[name[pdf]] | keyword[def] identifier[_pdf] ( identifier[self] , identifier[phi] ):
literal[string]
identifier[pdf] = identifier[np] . identifier[inner] ( identifier[self] . identifier[_vn] , identifier[np] . identifier[cos] ( identifier[np] . identifier[outer] ( identifier[phi] , identifier[self] . identifier[_n] )))
identifier[pdf] *= literal[int]
identifier[pdf] += literal[int]
keyword[return] identifier[pdf] | def _pdf(self, phi):
"""
Evaluate the _unnormalized_ flow PDF.
"""
pdf = np.inner(self._vn, np.cos(np.outer(phi, self._n)))
pdf *= 2.0
pdf += 1.0
return pdf |
def process_model_scores(self, model_names, root_cache,
include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores | def function[process_model_scores, parameter[self, model_names, root_cache, include_features]]:
constant[
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
]
variable[model_scores] assign[=] dictionary[[], []]
for taget[name[model_name]] in starred[name[model_names]] begin[:]
call[name[model_scores]][name[model_name]] assign[=] dictionary[[], []]
call[call[name[model_scores]][name[model_name]]][constant[score]] assign[=] call[name[self]._process_score, parameter[name[model_name]]]
if name[include_features] begin[:]
variable[base_feature_map] assign[=] call[name[self]._solve_base_feature_map, parameter[name[model_name]]]
call[call[name[model_scores]][name[model_name]]][constant[features]] assign[=] name[base_feature_map]
return[name[model_scores]] | keyword[def] identifier[process_model_scores] ( identifier[self] , identifier[model_names] , identifier[root_cache] ,
identifier[include_features] = keyword[False] ):
literal[string]
identifier[model_scores] ={}
keyword[for] identifier[model_name] keyword[in] identifier[model_names] :
identifier[model_scores] [ identifier[model_name] ]={}
identifier[model_scores] [ identifier[model_name] ][ literal[string] ]= identifier[self] . identifier[_process_score] ( identifier[model_name] , identifier[dependency_cache] = identifier[root_cache] )
keyword[if] identifier[include_features] :
identifier[base_feature_map] = identifier[self] . identifier[_solve_base_feature_map] (
identifier[model_name] , identifier[dependency_cache] = identifier[root_cache] )
identifier[model_scores] [ identifier[model_name] ][ literal[string] ]= identifier[base_feature_map]
keyword[return] identifier[model_scores] | def process_model_scores(self, model_names, root_cache, include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['model_name']]
return model_scores |
def _calc_texture_gradient(img):
"""
calculate texture gradient for entire image
The original SelectiveSearch algorithm proposed Gaussian derivative
for 8 orientations, but we use LBP instead.
output will be [height(*)][width(*)]
"""
ret = numpy.zeros((img.shape[0], img.shape[1], img.shape[2]))
for colour_channel in (0, 1, 2):
ret[:, :, colour_channel] = skimage.feature.local_binary_pattern(
img[:, :, colour_channel], 8, 1.0)
return ret | def function[_calc_texture_gradient, parameter[img]]:
constant[
calculate texture gradient for entire image
The original SelectiveSearch algorithm proposed Gaussian derivative
for 8 orientations, but we use LBP instead.
output will be [height(*)][width(*)]
]
variable[ret] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Subscript object at 0x7da1b1d75d50>, <ast.Subscript object at 0x7da1b1d74700>, <ast.Subscript object at 0x7da1b1d74fa0>]]]]
for taget[name[colour_channel]] in starred[tuple[[<ast.Constant object at 0x7da1b1d75180>, <ast.Constant object at 0x7da1b1d77460>, <ast.Constant object at 0x7da1b1d774f0>]]] begin[:]
call[name[ret]][tuple[[<ast.Slice object at 0x7da1b1d777c0>, <ast.Slice object at 0x7da1b1d779a0>, <ast.Name object at 0x7da1b1d77970>]]] assign[=] call[name[skimage].feature.local_binary_pattern, parameter[call[name[img]][tuple[[<ast.Slice object at 0x7da1b1d767d0>, <ast.Slice object at 0x7da1b1d77a00>, <ast.Name object at 0x7da1b1d77880>]]], constant[8], constant[1.0]]]
return[name[ret]] | keyword[def] identifier[_calc_texture_gradient] ( identifier[img] ):
literal[string]
identifier[ret] = identifier[numpy] . identifier[zeros] (( identifier[img] . identifier[shape] [ literal[int] ], identifier[img] . identifier[shape] [ literal[int] ], identifier[img] . identifier[shape] [ literal[int] ]))
keyword[for] identifier[colour_channel] keyword[in] ( literal[int] , literal[int] , literal[int] ):
identifier[ret] [:,:, identifier[colour_channel] ]= identifier[skimage] . identifier[feature] . identifier[local_binary_pattern] (
identifier[img] [:,:, identifier[colour_channel] ], literal[int] , literal[int] )
keyword[return] identifier[ret] | def _calc_texture_gradient(img):
"""
calculate texture gradient for entire image
The original SelectiveSearch algorithm proposed Gaussian derivative
for 8 orientations, but we use LBP instead.
output will be [height(*)][width(*)]
"""
ret = numpy.zeros((img.shape[0], img.shape[1], img.shape[2]))
for colour_channel in (0, 1, 2):
ret[:, :, colour_channel] = skimage.feature.local_binary_pattern(img[:, :, colour_channel], 8, 1.0) # depends on [control=['for'], data=['colour_channel']]
return ret |
def consistent(self,lab):
"""
Check whether the labeling is consistent with all constraints
"""
for const in self.constraints:
if not const.consistent(lab):
return False
return True | def function[consistent, parameter[self, lab]]:
constant[
Check whether the labeling is consistent with all constraints
]
for taget[name[const]] in starred[name[self].constraints] begin[:]
if <ast.UnaryOp object at 0x7da20c6c4f40> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[consistent] ( identifier[self] , identifier[lab] ):
literal[string]
keyword[for] identifier[const] keyword[in] identifier[self] . identifier[constraints] :
keyword[if] keyword[not] identifier[const] . identifier[consistent] ( identifier[lab] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def consistent(self, lab):
"""
Check whether the labeling is consistent with all constraints
"""
for const in self.constraints:
if not const.consistent(lab):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['const']]
return True |
def fetch(self, endpoint, data = None):
"""
for getting data after logged in
"""
payload = {
"lastServerChangeId": "-1",
"csrf": self.__csrf,
"apiClient": "WEB"
}
if data is not None:
payload.update(data)
return self.post(endpoint, payload) | def function[fetch, parameter[self, endpoint, data]]:
constant[
for getting data after logged in
]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da20c76d960>, <ast.Constant object at 0x7da20c76d2a0>, <ast.Constant object at 0x7da20c76e290>], [<ast.Constant object at 0x7da20c76fe50>, <ast.Attribute object at 0x7da20c76f220>, <ast.Constant object at 0x7da20c76c5b0>]]
if compare[name[data] is_not constant[None]] begin[:]
call[name[payload].update, parameter[name[data]]]
return[call[name[self].post, parameter[name[endpoint], name[payload]]]] | keyword[def] identifier[fetch] ( identifier[self] , identifier[endpoint] , identifier[data] = keyword[None] ):
literal[string]
identifier[payload] ={
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[__csrf] ,
literal[string] : literal[string]
}
keyword[if] identifier[data] keyword[is] keyword[not] keyword[None] :
identifier[payload] . identifier[update] ( identifier[data] )
keyword[return] identifier[self] . identifier[post] ( identifier[endpoint] , identifier[payload] ) | def fetch(self, endpoint, data=None):
"""
for getting data after logged in
"""
payload = {'lastServerChangeId': '-1', 'csrf': self.__csrf, 'apiClient': 'WEB'}
if data is not None:
payload.update(data) # depends on [control=['if'], data=['data']]
return self.post(endpoint, payload) |
def handle_authorized(self, event):
"""Send the initial presence after log-in."""
request_software_version(self.client, self.target_jid,
self.success, self.failure) | def function[handle_authorized, parameter[self, event]]:
constant[Send the initial presence after log-in.]
call[name[request_software_version], parameter[name[self].client, name[self].target_jid, name[self].success, name[self].failure]] | keyword[def] identifier[handle_authorized] ( identifier[self] , identifier[event] ):
literal[string]
identifier[request_software_version] ( identifier[self] . identifier[client] , identifier[self] . identifier[target_jid] ,
identifier[self] . identifier[success] , identifier[self] . identifier[failure] ) | def handle_authorized(self, event):
"""Send the initial presence after log-in."""
request_software_version(self.client, self.target_jid, self.success, self.failure) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.