code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def replace_placeholders(self, value):
"""Replaces placeholders that can be used e.g. in filepaths.
Supported placeholders:
* {project_runtime_dir}
* {project_name}
* {runtime_dir}
:param str|unicode|list[str|unicode]|None value:
:rtype: None|str|unicode|list[str|unicode]
"""
if not value:
return value
is_list = isinstance(value, list)
values = []
for value in listify(value):
runtime_dir = self.get_runtime_dir()
project_name = self.project_name
value = value.replace('{runtime_dir}', runtime_dir)
value = value.replace('{project_name}', project_name)
value = value.replace('{project_runtime_dir}', os.path.join(runtime_dir, project_name))
values.append(value)
value = values if is_list else values.pop()
return value | def function[replace_placeholders, parameter[self, value]]:
constant[Replaces placeholders that can be used e.g. in filepaths.
Supported placeholders:
* {project_runtime_dir}
* {project_name}
* {runtime_dir}
:param str|unicode|list[str|unicode]|None value:
:rtype: None|str|unicode|list[str|unicode]
]
if <ast.UnaryOp object at 0x7da1b10c6d70> begin[:]
return[name[value]]
variable[is_list] assign[=] call[name[isinstance], parameter[name[value], name[list]]]
variable[values] assign[=] list[[]]
for taget[name[value]] in starred[call[name[listify], parameter[name[value]]]] begin[:]
variable[runtime_dir] assign[=] call[name[self].get_runtime_dir, parameter[]]
variable[project_name] assign[=] name[self].project_name
variable[value] assign[=] call[name[value].replace, parameter[constant[{runtime_dir}], name[runtime_dir]]]
variable[value] assign[=] call[name[value].replace, parameter[constant[{project_name}], name[project_name]]]
variable[value] assign[=] call[name[value].replace, parameter[constant[{project_runtime_dir}], call[name[os].path.join, parameter[name[runtime_dir], name[project_name]]]]]
call[name[values].append, parameter[name[value]]]
variable[value] assign[=] <ast.IfExp object at 0x7da1b10a66e0>
return[name[value]] | keyword[def] identifier[replace_placeholders] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[value] :
keyword[return] identifier[value]
identifier[is_list] = identifier[isinstance] ( identifier[value] , identifier[list] )
identifier[values] =[]
keyword[for] identifier[value] keyword[in] identifier[listify] ( identifier[value] ):
identifier[runtime_dir] = identifier[self] . identifier[get_runtime_dir] ()
identifier[project_name] = identifier[self] . identifier[project_name]
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , identifier[runtime_dir] )
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , identifier[project_name] )
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , identifier[os] . identifier[path] . identifier[join] ( identifier[runtime_dir] , identifier[project_name] ))
identifier[values] . identifier[append] ( identifier[value] )
identifier[value] = identifier[values] keyword[if] identifier[is_list] keyword[else] identifier[values] . identifier[pop] ()
keyword[return] identifier[value] | def replace_placeholders(self, value):
"""Replaces placeholders that can be used e.g. in filepaths.
Supported placeholders:
* {project_runtime_dir}
* {project_name}
* {runtime_dir}
:param str|unicode|list[str|unicode]|None value:
:rtype: None|str|unicode|list[str|unicode]
"""
if not value:
return value # depends on [control=['if'], data=[]]
is_list = isinstance(value, list)
values = []
for value in listify(value):
runtime_dir = self.get_runtime_dir()
project_name = self.project_name
value = value.replace('{runtime_dir}', runtime_dir)
value = value.replace('{project_name}', project_name)
value = value.replace('{project_runtime_dir}', os.path.join(runtime_dir, project_name))
values.append(value) # depends on [control=['for'], data=['value']]
value = values if is_list else values.pop()
return value |
def service_changed(self, event):
# type: (ServiceEvent) -> None
"""
Handles an event about the iPOPO service
"""
kind = event.get_kind()
if kind == ServiceEvent.REGISTERED:
# iPOPO service registered: register to factory events
with use_ipopo(self.__context) as ipopo:
ipopo.add_listener(self) | def function[service_changed, parameter[self, event]]:
constant[
Handles an event about the iPOPO service
]
variable[kind] assign[=] call[name[event].get_kind, parameter[]]
if compare[name[kind] equal[==] name[ServiceEvent].REGISTERED] begin[:]
with call[name[use_ipopo], parameter[name[self].__context]] begin[:]
call[name[ipopo].add_listener, parameter[name[self]]] | keyword[def] identifier[service_changed] ( identifier[self] , identifier[event] ):
literal[string]
identifier[kind] = identifier[event] . identifier[get_kind] ()
keyword[if] identifier[kind] == identifier[ServiceEvent] . identifier[REGISTERED] :
keyword[with] identifier[use_ipopo] ( identifier[self] . identifier[__context] ) keyword[as] identifier[ipopo] :
identifier[ipopo] . identifier[add_listener] ( identifier[self] ) | def service_changed(self, event):
# type: (ServiceEvent) -> None
'\n Handles an event about the iPOPO service\n '
kind = event.get_kind()
if kind == ServiceEvent.REGISTERED:
# iPOPO service registered: register to factory events
with use_ipopo(self.__context) as ipopo:
ipopo.add_listener(self) # depends on [control=['with'], data=['ipopo']] # depends on [control=['if'], data=[]] |
def create_communication_channel(self, user_id, communication_channel_type, communication_channel_address, communication_channel_token=None, skip_confirmation=None):
"""
Create a communication channel.
Creates a new communication channel for the specified user.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - communication_channel[address]
"""An email address or SMS number. Not required for "push" type channels."""
data["communication_channel[address]"] = communication_channel_address
# REQUIRED - communication_channel[type]
"""The type of communication channel.
In order to enable push notification support, the server must be
properly configured (via sns.yml) to communicate with Amazon
Simple Notification Services, and the developer key used to create
the access token from this request must have an SNS ARN configured on
it."""
self._validate_enum(communication_channel_type, ["email", "sms", "push"])
data["communication_channel[type]"] = communication_channel_type
# OPTIONAL - communication_channel[token]
"""A registration id, device token, or equivalent token given to an app when
registering with a push notification provider. Only valid for "push" type channels."""
if communication_channel_token is not None:
data["communication_channel[token]"] = communication_channel_token
# OPTIONAL - skip_confirmation
"""Only valid for site admins and account admins making requests; If true, the channel is
automatically validated and no confirmation email or SMS is sent.
Otherwise, the user must respond to a confirmation message to confirm the
channel."""
if skip_confirmation is not None:
data["skip_confirmation"] = skip_confirmation
self.logger.debug("POST /api/v1/users/{user_id}/communication_channels with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/users/{user_id}/communication_channels".format(**path), data=data, params=params, single_item=True) | def function[create_communication_channel, parameter[self, user_id, communication_channel_type, communication_channel_address, communication_channel_token, skip_confirmation]]:
constant[
Create a communication channel.
Creates a new communication channel for the specified user.
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[user_id]] assign[=] name[user_id]
constant[An email address or SMS number. Not required for "push" type channels.]
call[name[data]][constant[communication_channel[address]]] assign[=] name[communication_channel_address]
constant[The type of communication channel.
In order to enable push notification support, the server must be
properly configured (via sns.yml) to communicate with Amazon
Simple Notification Services, and the developer key used to create
the access token from this request must have an SNS ARN configured on
it.]
call[name[self]._validate_enum, parameter[name[communication_channel_type], list[[<ast.Constant object at 0x7da1b0bd0c70>, <ast.Constant object at 0x7da1b0bd0ca0>, <ast.Constant object at 0x7da1b0bd0cd0>]]]]
call[name[data]][constant[communication_channel[type]]] assign[=] name[communication_channel_type]
constant[A registration id, device token, or equivalent token given to an app when
registering with a push notification provider. Only valid for "push" type channels.]
if compare[name[communication_channel_token] is_not constant[None]] begin[:]
call[name[data]][constant[communication_channel[token]]] assign[=] name[communication_channel_token]
constant[Only valid for site admins and account admins making requests; If true, the channel is
automatically validated and no confirmation email or SMS is sent.
Otherwise, the user must respond to a confirmation message to confirm the
channel.]
if compare[name[skip_confirmation] is_not constant[None]] begin[:]
call[name[data]][constant[skip_confirmation]] assign[=] name[skip_confirmation]
call[name[self].logger.debug, parameter[call[constant[POST /api/v1/users/{user_id}/communication_channels with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[POST], call[constant[/api/v1/users/{user_id}/communication_channels].format, parameter[]]]]] | keyword[def] identifier[create_communication_channel] ( identifier[self] , identifier[user_id] , identifier[communication_channel_type] , identifier[communication_channel_address] , identifier[communication_channel_token] = keyword[None] , identifier[skip_confirmation] = keyword[None] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[user_id]
literal[string]
identifier[data] [ literal[string] ]= identifier[communication_channel_address]
literal[string]
identifier[self] . identifier[_validate_enum] ( identifier[communication_channel_type] ,[ literal[string] , literal[string] , literal[string] ])
identifier[data] [ literal[string] ]= identifier[communication_channel_type]
literal[string]
keyword[if] identifier[communication_channel_token] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[communication_channel_token]
literal[string]
keyword[if] identifier[skip_confirmation] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[skip_confirmation]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[single_item] = keyword[True] ) | def create_communication_channel(self, user_id, communication_channel_type, communication_channel_address, communication_channel_token=None, skip_confirmation=None):
"""
Create a communication channel.
Creates a new communication channel for the specified user.
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - user_id
'ID'
path['user_id'] = user_id # REQUIRED - communication_channel[address]
'An email address or SMS number. Not required for "push" type channels.'
data['communication_channel[address]'] = communication_channel_address # REQUIRED - communication_channel[type]
'The type of communication channel.\n \n In order to enable push notification support, the server must be\n properly configured (via sns.yml) to communicate with Amazon\n Simple Notification Services, and the developer key used to create\n the access token from this request must have an SNS ARN configured on\n it.'
self._validate_enum(communication_channel_type, ['email', 'sms', 'push'])
data['communication_channel[type]'] = communication_channel_type # OPTIONAL - communication_channel[token]
'A registration id, device token, or equivalent token given to an app when\n registering with a push notification provider. Only valid for "push" type channels.'
if communication_channel_token is not None:
data['communication_channel[token]'] = communication_channel_token # depends on [control=['if'], data=['communication_channel_token']] # OPTIONAL - skip_confirmation
'Only valid for site admins and account admins making requests; If true, the channel is\n automatically validated and no confirmation email or SMS is sent.\n Otherwise, the user must respond to a confirmation message to confirm the\n channel.'
if skip_confirmation is not None:
data['skip_confirmation'] = skip_confirmation # depends on [control=['if'], data=['skip_confirmation']]
self.logger.debug('POST /api/v1/users/{user_id}/communication_channels with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('POST', '/api/v1/users/{user_id}/communication_channels'.format(**path), data=data, params=params, single_item=True) |
def _send_packet(self, data):
" Send to server. "
data = json.dumps(data).encode('utf-8')
# Be sure that our socket is blocking, otherwise, the send() call could
# raise `BlockingIOError` if the buffer is full.
self.socket.setblocking(1)
self.socket.send(data + b'\0') | def function[_send_packet, parameter[self, data]]:
constant[ Send to server. ]
variable[data] assign[=] call[call[name[json].dumps, parameter[name[data]]].encode, parameter[constant[utf-8]]]
call[name[self].socket.setblocking, parameter[constant[1]]]
call[name[self].socket.send, parameter[binary_operation[name[data] + constant[b'\x00']]]] | keyword[def] identifier[_send_packet] ( identifier[self] , identifier[data] ):
literal[string]
identifier[data] = identifier[json] . identifier[dumps] ( identifier[data] ). identifier[encode] ( literal[string] )
identifier[self] . identifier[socket] . identifier[setblocking] ( literal[int] )
identifier[self] . identifier[socket] . identifier[send] ( identifier[data] + literal[string] ) | def _send_packet(self, data):
""" Send to server. """
data = json.dumps(data).encode('utf-8')
# Be sure that our socket is blocking, otherwise, the send() call could
# raise `BlockingIOError` if the buffer is full.
self.socket.setblocking(1)
self.socket.send(data + b'\x00') |
def md_to_text(content):
""" Converts markdown content to text """
text = None
html = markdown.markdown(content)
if html:
text = html_to_text(content)
return text | def function[md_to_text, parameter[content]]:
constant[ Converts markdown content to text ]
variable[text] assign[=] constant[None]
variable[html] assign[=] call[name[markdown].markdown, parameter[name[content]]]
if name[html] begin[:]
variable[text] assign[=] call[name[html_to_text], parameter[name[content]]]
return[name[text]] | keyword[def] identifier[md_to_text] ( identifier[content] ):
literal[string]
identifier[text] = keyword[None]
identifier[html] = identifier[markdown] . identifier[markdown] ( identifier[content] )
keyword[if] identifier[html] :
identifier[text] = identifier[html_to_text] ( identifier[content] )
keyword[return] identifier[text] | def md_to_text(content):
""" Converts markdown content to text """
text = None
html = markdown.markdown(content)
if html:
text = html_to_text(content) # depends on [control=['if'], data=[]]
return text |
def getnii_descr(fim):
'''
Extracts the custom description header field to dictionary
'''
nim = nib.load(fim)
hdr = nim.header
rcnlst = hdr['descrip'].item().split(';')
rcndic = {}
if rcnlst[0]=='':
# print 'w> no description in the NIfTI header'
return rcndic
for ci in range(len(rcnlst)):
tmp = rcnlst[ci].split('=')
rcndic[tmp[0]] = tmp[1]
return rcndic | def function[getnii_descr, parameter[fim]]:
constant[
Extracts the custom description header field to dictionary
]
variable[nim] assign[=] call[name[nib].load, parameter[name[fim]]]
variable[hdr] assign[=] name[nim].header
variable[rcnlst] assign[=] call[call[call[name[hdr]][constant[descrip]].item, parameter[]].split, parameter[constant[;]]]
variable[rcndic] assign[=] dictionary[[], []]
if compare[call[name[rcnlst]][constant[0]] equal[==] constant[]] begin[:]
return[name[rcndic]]
for taget[name[ci]] in starred[call[name[range], parameter[call[name[len], parameter[name[rcnlst]]]]]] begin[:]
variable[tmp] assign[=] call[call[name[rcnlst]][name[ci]].split, parameter[constant[=]]]
call[name[rcndic]][call[name[tmp]][constant[0]]] assign[=] call[name[tmp]][constant[1]]
return[name[rcndic]] | keyword[def] identifier[getnii_descr] ( identifier[fim] ):
literal[string]
identifier[nim] = identifier[nib] . identifier[load] ( identifier[fim] )
identifier[hdr] = identifier[nim] . identifier[header]
identifier[rcnlst] = identifier[hdr] [ literal[string] ]. identifier[item] (). identifier[split] ( literal[string] )
identifier[rcndic] ={}
keyword[if] identifier[rcnlst] [ literal[int] ]== literal[string] :
keyword[return] identifier[rcndic]
keyword[for] identifier[ci] keyword[in] identifier[range] ( identifier[len] ( identifier[rcnlst] )):
identifier[tmp] = identifier[rcnlst] [ identifier[ci] ]. identifier[split] ( literal[string] )
identifier[rcndic] [ identifier[tmp] [ literal[int] ]]= identifier[tmp] [ literal[int] ]
keyword[return] identifier[rcndic] | def getnii_descr(fim):
"""
Extracts the custom description header field to dictionary
"""
nim = nib.load(fim)
hdr = nim.header
rcnlst = hdr['descrip'].item().split(';')
rcndic = {}
if rcnlst[0] == '':
# print 'w> no description in the NIfTI header'
return rcndic # depends on [control=['if'], data=[]]
for ci in range(len(rcnlst)):
tmp = rcnlst[ci].split('=')
rcndic[tmp[0]] = tmp[1] # depends on [control=['for'], data=['ci']]
return rcndic |
def parse_info_frags(info_frags):
"""Import an info_frags.txt file and return a dictionary where each key
is a newly formed scaffold and each value is the list of bins and their
origin on the initial scaffolding.
"""
new_scaffolds = {}
with open(info_frags, "r") as info_frags_handle:
current_new_contig = None
for line in info_frags_handle:
if line.startswith(">"):
current_new_contig = str(line[1:-1])
new_scaffolds[current_new_contig] = []
elif line.startswith("init_contig"):
pass
else:
(init_contig, id_frag, orientation, pos_start, pos_end) = str(
line[:-1]
).split("\t")
start = int(pos_start)
end = int(pos_end)
ori = int(orientation)
fragid = int(id_frag)
assert start < end
assert ori in {-1, 1}
new_scaffolds[current_new_contig].append(
[init_contig, fragid, start, end, ori]
)
return new_scaffolds | def function[parse_info_frags, parameter[info_frags]]:
constant[Import an info_frags.txt file and return a dictionary where each key
is a newly formed scaffold and each value is the list of bins and their
origin on the initial scaffolding.
]
variable[new_scaffolds] assign[=] dictionary[[], []]
with call[name[open], parameter[name[info_frags], constant[r]]] begin[:]
variable[current_new_contig] assign[=] constant[None]
for taget[name[line]] in starred[name[info_frags_handle]] begin[:]
if call[name[line].startswith, parameter[constant[>]]] begin[:]
variable[current_new_contig] assign[=] call[name[str], parameter[call[name[line]][<ast.Slice object at 0x7da1b055bee0>]]]
call[name[new_scaffolds]][name[current_new_contig]] assign[=] list[[]]
return[name[new_scaffolds]] | keyword[def] identifier[parse_info_frags] ( identifier[info_frags] ):
literal[string]
identifier[new_scaffolds] ={}
keyword[with] identifier[open] ( identifier[info_frags] , literal[string] ) keyword[as] identifier[info_frags_handle] :
identifier[current_new_contig] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[info_frags_handle] :
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[current_new_contig] = identifier[str] ( identifier[line] [ literal[int] :- literal[int] ])
identifier[new_scaffolds] [ identifier[current_new_contig] ]=[]
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[pass]
keyword[else] :
( identifier[init_contig] , identifier[id_frag] , identifier[orientation] , identifier[pos_start] , identifier[pos_end] )= identifier[str] (
identifier[line] [:- literal[int] ]
). identifier[split] ( literal[string] )
identifier[start] = identifier[int] ( identifier[pos_start] )
identifier[end] = identifier[int] ( identifier[pos_end] )
identifier[ori] = identifier[int] ( identifier[orientation] )
identifier[fragid] = identifier[int] ( identifier[id_frag] )
keyword[assert] identifier[start] < identifier[end]
keyword[assert] identifier[ori] keyword[in] {- literal[int] , literal[int] }
identifier[new_scaffolds] [ identifier[current_new_contig] ]. identifier[append] (
[ identifier[init_contig] , identifier[fragid] , identifier[start] , identifier[end] , identifier[ori] ]
)
keyword[return] identifier[new_scaffolds] | def parse_info_frags(info_frags):
"""Import an info_frags.txt file and return a dictionary where each key
is a newly formed scaffold and each value is the list of bins and their
origin on the initial scaffolding.
"""
new_scaffolds = {}
with open(info_frags, 'r') as info_frags_handle:
current_new_contig = None
for line in info_frags_handle:
if line.startswith('>'):
current_new_contig = str(line[1:-1])
new_scaffolds[current_new_contig] = [] # depends on [control=['if'], data=[]]
elif line.startswith('init_contig'):
pass # depends on [control=['if'], data=[]]
else:
(init_contig, id_frag, orientation, pos_start, pos_end) = str(line[:-1]).split('\t')
start = int(pos_start)
end = int(pos_end)
ori = int(orientation)
fragid = int(id_frag)
assert start < end
assert ori in {-1, 1}
new_scaffolds[current_new_contig].append([init_contig, fragid, start, end, ori]) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['info_frags_handle']]
return new_scaffolds |
def fit(self, X, y, model_filename=None):
"""Fit FastText according to X, y
Parameters:
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
"""
train_file = "temp.train"
X = [x.replace("\n", " ") for x in X]
y = [item[0] for item in y]
y = [_.replace(" ", "-") for _ in y]
lines = ["__label__{} , {}".format(j, i) for i, j in zip(X, y)]
content = "\n".join(lines)
write(train_file, content)
if model_filename:
self.estimator = fasttext.supervised(train_file, model_filename)
else:
self.estimator = fasttext.supervised(train_file)
os.remove(train_file) | def function[fit, parameter[self, X, y, model_filename]]:
constant[Fit FastText according to X, y
Parameters:
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
]
variable[train_file] assign[=] constant[temp.train]
variable[X] assign[=] <ast.ListComp object at 0x7da18dc99f90>
variable[y] assign[=] <ast.ListComp object at 0x7da18dc99cc0>
variable[y] assign[=] <ast.ListComp object at 0x7da18dc9a980>
variable[lines] assign[=] <ast.ListComp object at 0x7da18dc9b160>
variable[content] assign[=] call[constant[
].join, parameter[name[lines]]]
call[name[write], parameter[name[train_file], name[content]]]
if name[model_filename] begin[:]
name[self].estimator assign[=] call[name[fasttext].supervised, parameter[name[train_file], name[model_filename]]]
call[name[os].remove, parameter[name[train_file]]] | keyword[def] identifier[fit] ( identifier[self] , identifier[X] , identifier[y] , identifier[model_filename] = keyword[None] ):
literal[string]
identifier[train_file] = literal[string]
identifier[X] =[ identifier[x] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[x] keyword[in] identifier[X] ]
identifier[y] =[ identifier[item] [ literal[int] ] keyword[for] identifier[item] keyword[in] identifier[y] ]
identifier[y] =[ identifier[_] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[_] keyword[in] identifier[y] ]
identifier[lines] =[ literal[string] . identifier[format] ( identifier[j] , identifier[i] ) keyword[for] identifier[i] , identifier[j] keyword[in] identifier[zip] ( identifier[X] , identifier[y] )]
identifier[content] = literal[string] . identifier[join] ( identifier[lines] )
identifier[write] ( identifier[train_file] , identifier[content] )
keyword[if] identifier[model_filename] :
identifier[self] . identifier[estimator] = identifier[fasttext] . identifier[supervised] ( identifier[train_file] , identifier[model_filename] )
keyword[else] :
identifier[self] . identifier[estimator] = identifier[fasttext] . identifier[supervised] ( identifier[train_file] )
identifier[os] . identifier[remove] ( identifier[train_file] ) | def fit(self, X, y, model_filename=None):
"""Fit FastText according to X, y
Parameters:
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
"""
train_file = 'temp.train'
X = [x.replace('\n', ' ') for x in X]
y = [item[0] for item in y]
y = [_.replace(' ', '-') for _ in y]
lines = ['__label__{} , {}'.format(j, i) for (i, j) in zip(X, y)]
content = '\n'.join(lines)
write(train_file, content)
if model_filename:
self.estimator = fasttext.supervised(train_file, model_filename) # depends on [control=['if'], data=[]]
else:
self.estimator = fasttext.supervised(train_file)
os.remove(train_file) |
def on_button_release(self, event):
"""Write back changes
If one or more items have been moved, the new position are stored in the corresponding meta data and a signal
notifying the change is emitted.
:param event: The button event
"""
affected_models = {}
for inmotion in self._movable_items:
inmotion.move((event.x, event.y))
rel_pos = gap_helper.calc_rel_pos_to_parent(self.view.canvas, inmotion.item,
inmotion.item.handles()[NW])
if isinstance(inmotion.item, StateView):
state_v = inmotion.item
state_m = state_v.model
self.view.canvas.request_update(state_v)
if state_m.get_meta_data_editor()['rel_pos'] != rel_pos:
state_m.set_meta_data_editor('rel_pos', rel_pos)
affected_models[state_m] = ("position", True, state_v)
elif isinstance(inmotion.item, NameView):
state_v = inmotion.item
state_m = self.view.canvas.get_parent(state_v).model
self.view.canvas.request_update(state_v)
if state_m.get_meta_data_editor()['name']['rel_pos'] != rel_pos:
state_m.set_meta_data_editor('name.rel_pos', rel_pos)
affected_models[state_m] = ("name_position", False, state_v)
elif isinstance(inmotion.item, TransitionView):
transition_v = inmotion.item
transition_m = transition_v.model
self.view.canvas.request_update(transition_v)
current_waypoints = gap_helper.get_relative_positions_of_waypoints(transition_v)
old_waypoints = transition_m.get_meta_data_editor()['waypoints']
if current_waypoints != old_waypoints:
transition_m.set_meta_data_editor('waypoints', current_waypoints)
affected_models[transition_m] = ("waypoints", False, transition_v)
if len(affected_models) == 1:
model = next(iter(affected_models))
change, affects_children, view = affected_models[model]
self.view.graphical_editor.emit('meta_data_changed', model, change, affects_children)
elif len(affected_models) > 1:
# if more than one item has been moved, we need to call the meta_data_changed signal on a common parent
common_parents = None
for change, affects_children, view in affected_models.values():
parents_of_view = set(self.view.canvas.get_ancestors(view))
if common_parents is None:
common_parents = parents_of_view
else:
common_parents = common_parents.intersection(parents_of_view)
assert len(common_parents) > 0, "The selected elements do not have common parent element"
for state_v in common_parents:
# Find most nested state_v
children_of_state_v = self.view.canvas.get_all_children(state_v)
if any(common_parent in children_of_state_v for common_parent in common_parents):
continue
self.view.graphical_editor.emit('meta_data_changed', state_v.model, "positions", True)
break
if not affected_models and self._old_selection is not None:
# The selection is handled differently depending on whether states were moved or not
# If no move operation was performed, we reset the selection to that is was before the button-press event
# and let the state machine selection handle the selection
self.view.unselect_all()
self.view.select_item(self._old_selection)
self.view.handle_new_selection(self._item)
self._move_name_v = False
self._old_selection = None
return super(MoveItemTool, self).on_button_release(event) | def function[on_button_release, parameter[self, event]]:
constant[Write back changes
If one or more items have been moved, the new position are stored in the corresponding meta data and a signal
notifying the change is emitted.
:param event: The button event
]
variable[affected_models] assign[=] dictionary[[], []]
for taget[name[inmotion]] in starred[name[self]._movable_items] begin[:]
call[name[inmotion].move, parameter[tuple[[<ast.Attribute object at 0x7da1b1c7d9f0>, <ast.Attribute object at 0x7da1b1c7ffa0>]]]]
variable[rel_pos] assign[=] call[name[gap_helper].calc_rel_pos_to_parent, parameter[name[self].view.canvas, name[inmotion].item, call[call[name[inmotion].item.handles, parameter[]]][name[NW]]]]
if call[name[isinstance], parameter[name[inmotion].item, name[StateView]]] begin[:]
variable[state_v] assign[=] name[inmotion].item
variable[state_m] assign[=] name[state_v].model
call[name[self].view.canvas.request_update, parameter[name[state_v]]]
if compare[call[call[name[state_m].get_meta_data_editor, parameter[]]][constant[rel_pos]] not_equal[!=] name[rel_pos]] begin[:]
call[name[state_m].set_meta_data_editor, parameter[constant[rel_pos], name[rel_pos]]]
call[name[affected_models]][name[state_m]] assign[=] tuple[[<ast.Constant object at 0x7da1b1c7c9d0>, <ast.Constant object at 0x7da1b1c7c040>, <ast.Name object at 0x7da1b1c7df00>]]
if compare[call[name[len], parameter[name[affected_models]]] equal[==] constant[1]] begin[:]
variable[model] assign[=] call[name[next], parameter[call[name[iter], parameter[name[affected_models]]]]]
<ast.Tuple object at 0x7da18bc73610> assign[=] call[name[affected_models]][name[model]]
call[name[self].view.graphical_editor.emit, parameter[constant[meta_data_changed], name[model], name[change], name[affects_children]]]
if <ast.BoolOp object at 0x7da1b1ad7a00> begin[:]
call[name[self].view.unselect_all, parameter[]]
call[name[self].view.select_item, parameter[name[self]._old_selection]]
call[name[self].view.handle_new_selection, parameter[name[self]._item]]
name[self]._move_name_v assign[=] constant[False]
name[self]._old_selection assign[=] constant[None]
return[call[call[name[super], parameter[name[MoveItemTool], name[self]]].on_button_release, parameter[name[event]]]] | keyword[def] identifier[on_button_release] ( identifier[self] , identifier[event] ):
literal[string]
identifier[affected_models] ={}
keyword[for] identifier[inmotion] keyword[in] identifier[self] . identifier[_movable_items] :
identifier[inmotion] . identifier[move] (( identifier[event] . identifier[x] , identifier[event] . identifier[y] ))
identifier[rel_pos] = identifier[gap_helper] . identifier[calc_rel_pos_to_parent] ( identifier[self] . identifier[view] . identifier[canvas] , identifier[inmotion] . identifier[item] ,
identifier[inmotion] . identifier[item] . identifier[handles] ()[ identifier[NW] ])
keyword[if] identifier[isinstance] ( identifier[inmotion] . identifier[item] , identifier[StateView] ):
identifier[state_v] = identifier[inmotion] . identifier[item]
identifier[state_m] = identifier[state_v] . identifier[model]
identifier[self] . identifier[view] . identifier[canvas] . identifier[request_update] ( identifier[state_v] )
keyword[if] identifier[state_m] . identifier[get_meta_data_editor] ()[ literal[string] ]!= identifier[rel_pos] :
identifier[state_m] . identifier[set_meta_data_editor] ( literal[string] , identifier[rel_pos] )
identifier[affected_models] [ identifier[state_m] ]=( literal[string] , keyword[True] , identifier[state_v] )
keyword[elif] identifier[isinstance] ( identifier[inmotion] . identifier[item] , identifier[NameView] ):
identifier[state_v] = identifier[inmotion] . identifier[item]
identifier[state_m] = identifier[self] . identifier[view] . identifier[canvas] . identifier[get_parent] ( identifier[state_v] ). identifier[model]
identifier[self] . identifier[view] . identifier[canvas] . identifier[request_update] ( identifier[state_v] )
keyword[if] identifier[state_m] . identifier[get_meta_data_editor] ()[ literal[string] ][ literal[string] ]!= identifier[rel_pos] :
identifier[state_m] . identifier[set_meta_data_editor] ( literal[string] , identifier[rel_pos] )
identifier[affected_models] [ identifier[state_m] ]=( literal[string] , keyword[False] , identifier[state_v] )
keyword[elif] identifier[isinstance] ( identifier[inmotion] . identifier[item] , identifier[TransitionView] ):
identifier[transition_v] = identifier[inmotion] . identifier[item]
identifier[transition_m] = identifier[transition_v] . identifier[model]
identifier[self] . identifier[view] . identifier[canvas] . identifier[request_update] ( identifier[transition_v] )
identifier[current_waypoints] = identifier[gap_helper] . identifier[get_relative_positions_of_waypoints] ( identifier[transition_v] )
identifier[old_waypoints] = identifier[transition_m] . identifier[get_meta_data_editor] ()[ literal[string] ]
keyword[if] identifier[current_waypoints] != identifier[old_waypoints] :
identifier[transition_m] . identifier[set_meta_data_editor] ( literal[string] , identifier[current_waypoints] )
identifier[affected_models] [ identifier[transition_m] ]=( literal[string] , keyword[False] , identifier[transition_v] )
keyword[if] identifier[len] ( identifier[affected_models] )== literal[int] :
identifier[model] = identifier[next] ( identifier[iter] ( identifier[affected_models] ))
identifier[change] , identifier[affects_children] , identifier[view] = identifier[affected_models] [ identifier[model] ]
identifier[self] . identifier[view] . identifier[graphical_editor] . identifier[emit] ( literal[string] , identifier[model] , identifier[change] , identifier[affects_children] )
keyword[elif] identifier[len] ( identifier[affected_models] )> literal[int] :
identifier[common_parents] = keyword[None]
keyword[for] identifier[change] , identifier[affects_children] , identifier[view] keyword[in] identifier[affected_models] . identifier[values] ():
identifier[parents_of_view] = identifier[set] ( identifier[self] . identifier[view] . identifier[canvas] . identifier[get_ancestors] ( identifier[view] ))
keyword[if] identifier[common_parents] keyword[is] keyword[None] :
identifier[common_parents] = identifier[parents_of_view]
keyword[else] :
identifier[common_parents] = identifier[common_parents] . identifier[intersection] ( identifier[parents_of_view] )
keyword[assert] identifier[len] ( identifier[common_parents] )> literal[int] , literal[string]
keyword[for] identifier[state_v] keyword[in] identifier[common_parents] :
identifier[children_of_state_v] = identifier[self] . identifier[view] . identifier[canvas] . identifier[get_all_children] ( identifier[state_v] )
keyword[if] identifier[any] ( identifier[common_parent] keyword[in] identifier[children_of_state_v] keyword[for] identifier[common_parent] keyword[in] identifier[common_parents] ):
keyword[continue]
identifier[self] . identifier[view] . identifier[graphical_editor] . identifier[emit] ( literal[string] , identifier[state_v] . identifier[model] , literal[string] , keyword[True] )
keyword[break]
keyword[if] keyword[not] identifier[affected_models] keyword[and] identifier[self] . identifier[_old_selection] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[view] . identifier[unselect_all] ()
identifier[self] . identifier[view] . identifier[select_item] ( identifier[self] . identifier[_old_selection] )
identifier[self] . identifier[view] . identifier[handle_new_selection] ( identifier[self] . identifier[_item] )
identifier[self] . identifier[_move_name_v] = keyword[False]
identifier[self] . identifier[_old_selection] = keyword[None]
keyword[return] identifier[super] ( identifier[MoveItemTool] , identifier[self] ). identifier[on_button_release] ( identifier[event] ) | def on_button_release(self, event):
"""Write back changes
If one or more items have been moved, the new position are stored in the corresponding meta data and a signal
notifying the change is emitted.
:param event: The button event
"""
affected_models = {}
for inmotion in self._movable_items:
inmotion.move((event.x, event.y))
rel_pos = gap_helper.calc_rel_pos_to_parent(self.view.canvas, inmotion.item, inmotion.item.handles()[NW])
if isinstance(inmotion.item, StateView):
state_v = inmotion.item
state_m = state_v.model
self.view.canvas.request_update(state_v)
if state_m.get_meta_data_editor()['rel_pos'] != rel_pos:
state_m.set_meta_data_editor('rel_pos', rel_pos)
affected_models[state_m] = ('position', True, state_v) # depends on [control=['if'], data=['rel_pos']] # depends on [control=['if'], data=[]]
elif isinstance(inmotion.item, NameView):
state_v = inmotion.item
state_m = self.view.canvas.get_parent(state_v).model
self.view.canvas.request_update(state_v)
if state_m.get_meta_data_editor()['name']['rel_pos'] != rel_pos:
state_m.set_meta_data_editor('name.rel_pos', rel_pos)
affected_models[state_m] = ('name_position', False, state_v) # depends on [control=['if'], data=['rel_pos']] # depends on [control=['if'], data=[]]
elif isinstance(inmotion.item, TransitionView):
transition_v = inmotion.item
transition_m = transition_v.model
self.view.canvas.request_update(transition_v)
current_waypoints = gap_helper.get_relative_positions_of_waypoints(transition_v)
old_waypoints = transition_m.get_meta_data_editor()['waypoints']
if current_waypoints != old_waypoints:
transition_m.set_meta_data_editor('waypoints', current_waypoints)
affected_models[transition_m] = ('waypoints', False, transition_v) # depends on [control=['if'], data=['current_waypoints']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['inmotion']]
if len(affected_models) == 1:
model = next(iter(affected_models))
(change, affects_children, view) = affected_models[model]
self.view.graphical_editor.emit('meta_data_changed', model, change, affects_children) # depends on [control=['if'], data=[]]
elif len(affected_models) > 1:
# if more than one item has been moved, we need to call the meta_data_changed signal on a common parent
common_parents = None
for (change, affects_children, view) in affected_models.values():
parents_of_view = set(self.view.canvas.get_ancestors(view))
if common_parents is None:
common_parents = parents_of_view # depends on [control=['if'], data=['common_parents']]
else:
common_parents = common_parents.intersection(parents_of_view) # depends on [control=['for'], data=[]]
assert len(common_parents) > 0, 'The selected elements do not have common parent element'
for state_v in common_parents:
# Find most nested state_v
children_of_state_v = self.view.canvas.get_all_children(state_v)
if any((common_parent in children_of_state_v for common_parent in common_parents)):
continue # depends on [control=['if'], data=[]]
self.view.graphical_editor.emit('meta_data_changed', state_v.model, 'positions', True)
break # depends on [control=['for'], data=['state_v']] # depends on [control=['if'], data=[]]
if not affected_models and self._old_selection is not None:
# The selection is handled differently depending on whether states were moved or not
# If no move operation was performed, we reset the selection to that is was before the button-press event
# and let the state machine selection handle the selection
self.view.unselect_all()
self.view.select_item(self._old_selection)
self.view.handle_new_selection(self._item) # depends on [control=['if'], data=[]]
self._move_name_v = False
self._old_selection = None
return super(MoveItemTool, self).on_button_release(event) |
def fetchmanyDict(self, size=None):
"""Fetch several rows as a list of dictionaries. Deprecated:
Use fetchmany() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchmanyDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchmany(size) | def function[fetchmanyDict, parameter[self, size]]:
constant[Fetch several rows as a list of dictionaries. Deprecated:
Use fetchmany() instead. Will be removed in 1.3.]
from relative_module[warnings] import module[warn]
call[name[warn], parameter[constant[fetchmanyDict() is non-standard and will be removed in 1.3], name[DeprecationWarning], constant[2]]]
return[call[name[self].fetchmany, parameter[name[size]]]] | keyword[def] identifier[fetchmanyDict] ( identifier[self] , identifier[size] = keyword[None] ):
literal[string]
keyword[from] identifier[warnings] keyword[import] identifier[warn]
identifier[warn] ( literal[string] ,
identifier[DeprecationWarning] , literal[int] )
keyword[return] identifier[self] . identifier[fetchmany] ( identifier[size] ) | def fetchmanyDict(self, size=None):
"""Fetch several rows as a list of dictionaries. Deprecated:
Use fetchmany() instead. Will be removed in 1.3."""
from warnings import warn
warn('fetchmanyDict() is non-standard and will be removed in 1.3', DeprecationWarning, 2)
return self.fetchmany(size) |
def process_files():
"""
Process files with a single progress bar
"""
with enlighten.Counter(total=100, desc='Simple', unit='ticks') as pbar:
for num in range(100): # pylint: disable=unused-variable
time.sleep(0.05)
pbar.update() | def function[process_files, parameter[]]:
constant[
Process files with a single progress bar
]
with call[name[enlighten].Counter, parameter[]] begin[:]
for taget[name[num]] in starred[call[name[range], parameter[constant[100]]]] begin[:]
call[name[time].sleep, parameter[constant[0.05]]]
call[name[pbar].update, parameter[]] | keyword[def] identifier[process_files] ():
literal[string]
keyword[with] identifier[enlighten] . identifier[Counter] ( identifier[total] = literal[int] , identifier[desc] = literal[string] , identifier[unit] = literal[string] ) keyword[as] identifier[pbar] :
keyword[for] identifier[num] keyword[in] identifier[range] ( literal[int] ):
identifier[time] . identifier[sleep] ( literal[int] )
identifier[pbar] . identifier[update] () | def process_files():
"""
Process files with a single progress bar
"""
with enlighten.Counter(total=100, desc='Simple', unit='ticks') as pbar:
for num in range(100): # pylint: disable=unused-variable
time.sleep(0.05)
pbar.update() # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['pbar']] |
def _exclude_by_filter(self, frame, filename):
'''
:param str filename:
The filename to filter.
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
'''
try:
return self._exclude_by_filter_cache[filename]
except KeyError:
cache = self._exclude_by_filter_cache
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_file(filename)
# pydevd files are always filtered out
if self.get_file_type(abs_real_path_and_basename) == self.PYDEV_FILE:
cache[filename] = True
else:
module_name = None
if self._files_filtering.require_module:
module_name = frame.f_globals.get('__name__')
cache[filename] = self._files_filtering.exclude_by_filter(filename, module_name)
return cache[filename] | def function[_exclude_by_filter, parameter[self, frame, filename]]:
constant[
:param str filename:
The filename to filter.
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
]
<ast.Try object at 0x7da18fe91300> | keyword[def] identifier[_exclude_by_filter] ( identifier[self] , identifier[frame] , identifier[filename] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_exclude_by_filter_cache] [ identifier[filename] ]
keyword[except] identifier[KeyError] :
identifier[cache] = identifier[self] . identifier[_exclude_by_filter_cache]
identifier[abs_real_path_and_basename] = identifier[get_abs_path_real_path_and_base_from_file] ( identifier[filename] )
keyword[if] identifier[self] . identifier[get_file_type] ( identifier[abs_real_path_and_basename] )== identifier[self] . identifier[PYDEV_FILE] :
identifier[cache] [ identifier[filename] ]= keyword[True]
keyword[else] :
identifier[module_name] = keyword[None]
keyword[if] identifier[self] . identifier[_files_filtering] . identifier[require_module] :
identifier[module_name] = identifier[frame] . identifier[f_globals] . identifier[get] ( literal[string] )
identifier[cache] [ identifier[filename] ]= identifier[self] . identifier[_files_filtering] . identifier[exclude_by_filter] ( identifier[filename] , identifier[module_name] )
keyword[return] identifier[cache] [ identifier[filename] ] | def _exclude_by_filter(self, frame, filename):
"""
:param str filename:
The filename to filter.
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
"""
try:
return self._exclude_by_filter_cache[filename] # depends on [control=['try'], data=[]]
except KeyError:
cache = self._exclude_by_filter_cache
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_file(filename)
# pydevd files are always filtered out
if self.get_file_type(abs_real_path_and_basename) == self.PYDEV_FILE:
cache[filename] = True # depends on [control=['if'], data=[]]
else:
module_name = None
if self._files_filtering.require_module:
module_name = frame.f_globals.get('__name__') # depends on [control=['if'], data=[]]
cache[filename] = self._files_filtering.exclude_by_filter(filename, module_name)
return cache[filename] # depends on [control=['except'], data=[]] |
def find(self, search_term: str) -> List[Commodity]:
""" Searches for security by part of the name """
query = (
self.query
.filter(Commodity.mnemonic.like('%' + search_term + '%') |
Commodity.fullname.like('%' + search_term + '%'))
)
return query.all() | def function[find, parameter[self, search_term]]:
constant[ Searches for security by part of the name ]
variable[query] assign[=] call[name[self].query.filter, parameter[binary_operation[call[name[Commodity].mnemonic.like, parameter[binary_operation[binary_operation[constant[%] + name[search_term]] + constant[%]]]] <ast.BitOr object at 0x7da2590d6aa0> call[name[Commodity].fullname.like, parameter[binary_operation[binary_operation[constant[%] + name[search_term]] + constant[%]]]]]]]
return[call[name[query].all, parameter[]]] | keyword[def] identifier[find] ( identifier[self] , identifier[search_term] : identifier[str] )-> identifier[List] [ identifier[Commodity] ]:
literal[string]
identifier[query] =(
identifier[self] . identifier[query]
. identifier[filter] ( identifier[Commodity] . identifier[mnemonic] . identifier[like] ( literal[string] + identifier[search_term] + literal[string] )|
identifier[Commodity] . identifier[fullname] . identifier[like] ( literal[string] + identifier[search_term] + literal[string] ))
)
keyword[return] identifier[query] . identifier[all] () | def find(self, search_term: str) -> List[Commodity]:
""" Searches for security by part of the name """
query = self.query.filter(Commodity.mnemonic.like('%' + search_term + '%') | Commodity.fullname.like('%' + search_term + '%'))
return query.all() |
def Hvap(self, T):
r'''Method to calculate enthalpy of vaporization for a pure fluid from
an equation of state, without iteration.
.. math::
\frac{dP^{sat}}{dT}=\frac{\Delta H_{vap}}{T(V_g - V_l)}
Results above the critical temperature are meaningless. A first-order
polynomial is used to extrapolate under 0.32 Tc; however, there is
normally not a volume solution to the EOS which can produce that
low of a pressure.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
Hvap : float
Increase in enthalpy needed for vaporization of liquid phase along
the saturation line, [J/mol]
Notes
-----
Calculates vapor pressure and its derivative with `Psat` and `dPsat_dT`
as well as molar volumes of the saturation liquid and vapor phase in
the process.
Very near the critical point this provides unrealistic results due to
`Psat`'s polynomials being insufficiently accurate.
References
----------
.. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
Psat = self.Psat(T)
dPsat_dT = self.dPsat_dT(T)
a_alpha = self.a_alpha_and_derivatives(T, full=False)
Vs = self.volume_solutions(T, Psat, self.b, self.delta, self.epsilon, a_alpha)
# Assume we can safely take the Vmax as gas, Vmin as l on the saturation line
Vs = [i.real for i in Vs]
V_l, V_g = min(Vs), max(Vs)
return dPsat_dT*T*(V_g-V_l) | def function[Hvap, parameter[self, T]]:
constant[Method to calculate enthalpy of vaporization for a pure fluid from
an equation of state, without iteration.
.. math::
\frac{dP^{sat}}{dT}=\frac{\Delta H_{vap}}{T(V_g - V_l)}
Results above the critical temperature are meaningless. A first-order
polynomial is used to extrapolate under 0.32 Tc; however, there is
normally not a volume solution to the EOS which can produce that
low of a pressure.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
Hvap : float
Increase in enthalpy needed for vaporization of liquid phase along
the saturation line, [J/mol]
Notes
-----
Calculates vapor pressure and its derivative with `Psat` and `dPsat_dT`
as well as molar volumes of the saturation liquid and vapor phase in
the process.
Very near the critical point this provides unrealistic results due to
`Psat`'s polynomials being insufficiently accurate.
References
----------
.. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
]
variable[Psat] assign[=] call[name[self].Psat, parameter[name[T]]]
variable[dPsat_dT] assign[=] call[name[self].dPsat_dT, parameter[name[T]]]
variable[a_alpha] assign[=] call[name[self].a_alpha_and_derivatives, parameter[name[T]]]
variable[Vs] assign[=] call[name[self].volume_solutions, parameter[name[T], name[Psat], name[self].b, name[self].delta, name[self].epsilon, name[a_alpha]]]
variable[Vs] assign[=] <ast.ListComp object at 0x7da2043461d0>
<ast.Tuple object at 0x7da204346050> assign[=] tuple[[<ast.Call object at 0x7da2043469b0>, <ast.Call object at 0x7da204344a00>]]
return[binary_operation[binary_operation[name[dPsat_dT] * name[T]] * binary_operation[name[V_g] - name[V_l]]]] | keyword[def] identifier[Hvap] ( identifier[self] , identifier[T] ):
literal[string]
identifier[Psat] = identifier[self] . identifier[Psat] ( identifier[T] )
identifier[dPsat_dT] = identifier[self] . identifier[dPsat_dT] ( identifier[T] )
identifier[a_alpha] = identifier[self] . identifier[a_alpha_and_derivatives] ( identifier[T] , identifier[full] = keyword[False] )
identifier[Vs] = identifier[self] . identifier[volume_solutions] ( identifier[T] , identifier[Psat] , identifier[self] . identifier[b] , identifier[self] . identifier[delta] , identifier[self] . identifier[epsilon] , identifier[a_alpha] )
identifier[Vs] =[ identifier[i] . identifier[real] keyword[for] identifier[i] keyword[in] identifier[Vs] ]
identifier[V_l] , identifier[V_g] = identifier[min] ( identifier[Vs] ), identifier[max] ( identifier[Vs] )
keyword[return] identifier[dPsat_dT] * identifier[T] *( identifier[V_g] - identifier[V_l] ) | def Hvap(self, T):
"""Method to calculate enthalpy of vaporization for a pure fluid from
an equation of state, without iteration.
.. math::
\\frac{dP^{sat}}{dT}=\\frac{\\Delta H_{vap}}{T(V_g - V_l)}
Results above the critical temperature are meaningless. A first-order
polynomial is used to extrapolate under 0.32 Tc; however, there is
normally not a volume solution to the EOS which can produce that
low of a pressure.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
Hvap : float
Increase in enthalpy needed for vaporization of liquid phase along
the saturation line, [J/mol]
Notes
-----
Calculates vapor pressure and its derivative with `Psat` and `dPsat_dT`
as well as molar volumes of the saturation liquid and vapor phase in
the process.
Very near the critical point this provides unrealistic results due to
`Psat`'s polynomials being insufficiently accurate.
References
----------
.. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
"""
Psat = self.Psat(T)
dPsat_dT = self.dPsat_dT(T)
a_alpha = self.a_alpha_and_derivatives(T, full=False)
Vs = self.volume_solutions(T, Psat, self.b, self.delta, self.epsilon, a_alpha)
# Assume we can safely take the Vmax as gas, Vmin as l on the saturation line
Vs = [i.real for i in Vs]
(V_l, V_g) = (min(Vs), max(Vs))
return dPsat_dT * T * (V_g - V_l) |
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
# Register current_profile
app.context_processor(lambda: dict(
current_userprofile=current_userprofile))
app.extensions['invenio-userprofiles'] = self | def function[init_app, parameter[self, app]]:
constant[Flask application initialization.]
call[name[self].init_config, parameter[name[app]]]
call[name[app].context_processor, parameter[<ast.Lambda object at 0x7da18f810c40>]]
call[name[app].extensions][constant[invenio-userprofiles]] assign[=] name[self] | keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ):
literal[string]
identifier[self] . identifier[init_config] ( identifier[app] )
identifier[app] . identifier[context_processor] ( keyword[lambda] : identifier[dict] (
identifier[current_userprofile] = identifier[current_userprofile] ))
identifier[app] . identifier[extensions] [ literal[string] ]= identifier[self] | def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
# Register current_profile
app.context_processor(lambda : dict(current_userprofile=current_userprofile))
app.extensions['invenio-userprofiles'] = self |
def render_in_browser(self, **kwargs):
"""Render the graph, open it in your browser with black magic"""
try:
from lxml.html import open_in_browser
except ImportError:
raise ImportError('You must install lxml to use render in browser')
kwargs.setdefault('force_uri_protocol', 'https')
open_in_browser(self.render_tree(**kwargs), encoding='utf-8') | def function[render_in_browser, parameter[self]]:
constant[Render the graph, open it in your browser with black magic]
<ast.Try object at 0x7da2044c3f40>
call[name[kwargs].setdefault, parameter[constant[force_uri_protocol], constant[https]]]
call[name[open_in_browser], parameter[call[name[self].render_tree, parameter[]]]] | keyword[def] identifier[render_in_browser] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[from] identifier[lxml] . identifier[html] keyword[import] identifier[open_in_browser]
keyword[except] identifier[ImportError] :
keyword[raise] identifier[ImportError] ( literal[string] )
identifier[kwargs] . identifier[setdefault] ( literal[string] , literal[string] )
identifier[open_in_browser] ( identifier[self] . identifier[render_tree] (** identifier[kwargs] ), identifier[encoding] = literal[string] ) | def render_in_browser(self, **kwargs):
"""Render the graph, open it in your browser with black magic"""
try:
from lxml.html import open_in_browser # depends on [control=['try'], data=[]]
except ImportError:
raise ImportError('You must install lxml to use render in browser') # depends on [control=['except'], data=[]]
kwargs.setdefault('force_uri_protocol', 'https')
open_in_browser(self.render_tree(**kwargs), encoding='utf-8') |
def to_cjson(self, buf=None, **kwargs):
"""Write a cjson file or return dictionary.
The cjson format is specified
`here <https://github.com/OpenChemistry/chemicaljson>`_.
Args:
buf (str): If it is a filepath, the data is written to
filepath. If it is None, a dictionary with the cjson
information is returned.
kwargs: The keyword arguments are passed into the
``dump`` function of the
`json library <https://docs.python.org/3/library/json.html>`_.
Returns:
dict:
"""
cjson_dict = {'chemical json': 0}
cjson_dict['atoms'] = {}
atomic_number = constants.elements['atomic_number'].to_dict()
cjson_dict['atoms'] = {'elements': {}}
cjson_dict['atoms']['elements']['number'] = [
int(atomic_number[x]) for x in self['atom']]
cjson_dict['atoms']['coords'] = {}
coords = self.loc[:, ['x', 'y', 'z']].values.reshape(len(self) * 3)
cjson_dict['atoms']['coords']['3d'] = [float(x) for x in coords]
bonds = []
bond_dict = self.get_bonds()
for i in bond_dict:
for b in bond_dict[i]:
bonds += [int(i), int(b)]
bond_dict[b].remove(i)
cjson_dict['bonds'] = {'connections': {}}
cjson_dict['bonds']['connections']['index'] = bonds
if buf is not None:
with open(buf, mode='w') as f:
f.write(json.dumps(cjson_dict, **kwargs))
else:
return cjson_dict | def function[to_cjson, parameter[self, buf]]:
constant[Write a cjson file or return dictionary.
The cjson format is specified
`here <https://github.com/OpenChemistry/chemicaljson>`_.
Args:
buf (str): If it is a filepath, the data is written to
filepath. If it is None, a dictionary with the cjson
information is returned.
kwargs: The keyword arguments are passed into the
``dump`` function of the
`json library <https://docs.python.org/3/library/json.html>`_.
Returns:
dict:
]
variable[cjson_dict] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c5f90>], [<ast.Constant object at 0x7da20c6c71c0>]]
call[name[cjson_dict]][constant[atoms]] assign[=] dictionary[[], []]
variable[atomic_number] assign[=] call[call[name[constants].elements][constant[atomic_number]].to_dict, parameter[]]
call[name[cjson_dict]][constant[atoms]] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c66b0>], [<ast.Dict object at 0x7da20c6c5ed0>]]
call[call[call[name[cjson_dict]][constant[atoms]]][constant[elements]]][constant[number]] assign[=] <ast.ListComp object at 0x7da18f00ded0>
call[call[name[cjson_dict]][constant[atoms]]][constant[coords]] assign[=] dictionary[[], []]
variable[coords] assign[=] call[call[name[self].loc][tuple[[<ast.Slice object at 0x7da18f00d240>, <ast.List object at 0x7da18f00fc40>]]].values.reshape, parameter[binary_operation[call[name[len], parameter[name[self]]] * constant[3]]]]
call[call[call[name[cjson_dict]][constant[atoms]]][constant[coords]]][constant[3d]] assign[=] <ast.ListComp object at 0x7da18f00ee90>
variable[bonds] assign[=] list[[]]
variable[bond_dict] assign[=] call[name[self].get_bonds, parameter[]]
for taget[name[i]] in starred[name[bond_dict]] begin[:]
for taget[name[b]] in starred[call[name[bond_dict]][name[i]]] begin[:]
<ast.AugAssign object at 0x7da20c6c4d90>
call[call[name[bond_dict]][name[b]].remove, parameter[name[i]]]
call[name[cjson_dict]][constant[bonds]] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c5ae0>], [<ast.Dict object at 0x7da20c6c6a40>]]
call[call[call[name[cjson_dict]][constant[bonds]]][constant[connections]]][constant[index]] assign[=] name[bonds]
if compare[name[buf] is_not constant[None]] begin[:]
with call[name[open], parameter[name[buf]]] begin[:]
call[name[f].write, parameter[call[name[json].dumps, parameter[name[cjson_dict]]]]] | keyword[def] identifier[to_cjson] ( identifier[self] , identifier[buf] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[cjson_dict] ={ literal[string] : literal[int] }
identifier[cjson_dict] [ literal[string] ]={}
identifier[atomic_number] = identifier[constants] . identifier[elements] [ literal[string] ]. identifier[to_dict] ()
identifier[cjson_dict] [ literal[string] ]={ literal[string] :{}}
identifier[cjson_dict] [ literal[string] ][ literal[string] ][ literal[string] ]=[
identifier[int] ( identifier[atomic_number] [ identifier[x] ]) keyword[for] identifier[x] keyword[in] identifier[self] [ literal[string] ]]
identifier[cjson_dict] [ literal[string] ][ literal[string] ]={}
identifier[coords] = identifier[self] . identifier[loc] [:,[ literal[string] , literal[string] , literal[string] ]]. identifier[values] . identifier[reshape] ( identifier[len] ( identifier[self] )* literal[int] )
identifier[cjson_dict] [ literal[string] ][ literal[string] ][ literal[string] ]=[ identifier[float] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[coords] ]
identifier[bonds] =[]
identifier[bond_dict] = identifier[self] . identifier[get_bonds] ()
keyword[for] identifier[i] keyword[in] identifier[bond_dict] :
keyword[for] identifier[b] keyword[in] identifier[bond_dict] [ identifier[i] ]:
identifier[bonds] +=[ identifier[int] ( identifier[i] ), identifier[int] ( identifier[b] )]
identifier[bond_dict] [ identifier[b] ]. identifier[remove] ( identifier[i] )
identifier[cjson_dict] [ literal[string] ]={ literal[string] :{}}
identifier[cjson_dict] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[bonds]
keyword[if] identifier[buf] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[open] ( identifier[buf] , identifier[mode] = literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[cjson_dict] ,** identifier[kwargs] ))
keyword[else] :
keyword[return] identifier[cjson_dict] | def to_cjson(self, buf=None, **kwargs):
"""Write a cjson file or return dictionary.
The cjson format is specified
`here <https://github.com/OpenChemistry/chemicaljson>`_.
Args:
buf (str): If it is a filepath, the data is written to
filepath. If it is None, a dictionary with the cjson
information is returned.
kwargs: The keyword arguments are passed into the
``dump`` function of the
`json library <https://docs.python.org/3/library/json.html>`_.
Returns:
dict:
"""
cjson_dict = {'chemical json': 0}
cjson_dict['atoms'] = {}
atomic_number = constants.elements['atomic_number'].to_dict()
cjson_dict['atoms'] = {'elements': {}}
cjson_dict['atoms']['elements']['number'] = [int(atomic_number[x]) for x in self['atom']]
cjson_dict['atoms']['coords'] = {}
coords = self.loc[:, ['x', 'y', 'z']].values.reshape(len(self) * 3)
cjson_dict['atoms']['coords']['3d'] = [float(x) for x in coords]
bonds = []
bond_dict = self.get_bonds()
for i in bond_dict:
for b in bond_dict[i]:
bonds += [int(i), int(b)]
bond_dict[b].remove(i) # depends on [control=['for'], data=['b']] # depends on [control=['for'], data=['i']]
cjson_dict['bonds'] = {'connections': {}}
cjson_dict['bonds']['connections']['index'] = bonds
if buf is not None:
with open(buf, mode='w') as f:
f.write(json.dumps(cjson_dict, **kwargs)) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=['buf']]
else:
return cjson_dict |
def update_many(self, filter, update, **kwargs):
"""
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_many
"""
self._arctic_lib.check_quota()
return self._collection.update_many(filter, update, **kwargs) | def function[update_many, parameter[self, filter, update]]:
constant[
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_many
]
call[name[self]._arctic_lib.check_quota, parameter[]]
return[call[name[self]._collection.update_many, parameter[name[filter], name[update]]]] | keyword[def] identifier[update_many] ( identifier[self] , identifier[filter] , identifier[update] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_arctic_lib] . identifier[check_quota] ()
keyword[return] identifier[self] . identifier[_collection] . identifier[update_many] ( identifier[filter] , identifier[update] ,** identifier[kwargs] ) | def update_many(self, filter, update, **kwargs):
"""
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_many
"""
self._arctic_lib.check_quota()
return self._collection.update_many(filter, update, **kwargs) |
def import_from_grammar_into_namespace(grammar, namespace, aliases):
"""Returns all rules and terminals of grammar, prepended
with a 'namespace' prefix, except for those which are aliased.
"""
imported_terms = dict(grammar.term_defs)
imported_rules = {n:(n,deepcopy(t),o) for n,t,o in grammar.rule_defs}
term_defs = []
rule_defs = []
def rule_dependencies(symbol):
if symbol.type != 'RULE':
return []
try:
_, tree, _ = imported_rules[symbol]
except KeyError:
raise GrammarError("Missing symbol '%s' in grammar %s" % (symbol, namespace))
return tree.scan_values(lambda x: x.type in ('RULE', 'TERMINAL'))
def get_namespace_name(name):
try:
return aliases[name].value
except KeyError:
if name[0] == '_':
return '_%s__%s' % (namespace, name[1:])
return '%s__%s' % (namespace, name)
to_import = list(bfs(aliases, rule_dependencies))
for symbol in to_import:
if symbol.type == 'TERMINAL':
term_defs.append([get_namespace_name(symbol), imported_terms[symbol]])
else:
assert symbol.type == 'RULE'
rule = imported_rules[symbol]
for t in rule[1].iter_subtrees():
for i, c in enumerate(t.children):
if isinstance(c, Token) and c.type in ('RULE', 'TERMINAL'):
t.children[i] = Token(c.type, get_namespace_name(c))
rule_defs.append((get_namespace_name(symbol), rule[1], rule[2]))
return term_defs, rule_defs | def function[import_from_grammar_into_namespace, parameter[grammar, namespace, aliases]]:
constant[Returns all rules and terminals of grammar, prepended
with a 'namespace' prefix, except for those which are aliased.
]
variable[imported_terms] assign[=] call[name[dict], parameter[name[grammar].term_defs]]
variable[imported_rules] assign[=] <ast.DictComp object at 0x7da207f02a70>
variable[term_defs] assign[=] list[[]]
variable[rule_defs] assign[=] list[[]]
def function[rule_dependencies, parameter[symbol]]:
if compare[name[symbol].type not_equal[!=] constant[RULE]] begin[:]
return[list[[]]]
<ast.Try object at 0x7da207f01fc0>
return[call[name[tree].scan_values, parameter[<ast.Lambda object at 0x7da207f02b60>]]]
def function[get_namespace_name, parameter[name]]:
<ast.Try object at 0x7da207f03700>
variable[to_import] assign[=] call[name[list], parameter[call[name[bfs], parameter[name[aliases], name[rule_dependencies]]]]]
for taget[name[symbol]] in starred[name[to_import]] begin[:]
if compare[name[symbol].type equal[==] constant[TERMINAL]] begin[:]
call[name[term_defs].append, parameter[list[[<ast.Call object at 0x7da207f03c10>, <ast.Subscript object at 0x7da207f02950>]]]]
return[tuple[[<ast.Name object at 0x7da18ede54e0>, <ast.Name object at 0x7da18ede6410>]]] | keyword[def] identifier[import_from_grammar_into_namespace] ( identifier[grammar] , identifier[namespace] , identifier[aliases] ):
literal[string]
identifier[imported_terms] = identifier[dict] ( identifier[grammar] . identifier[term_defs] )
identifier[imported_rules] ={ identifier[n] :( identifier[n] , identifier[deepcopy] ( identifier[t] ), identifier[o] ) keyword[for] identifier[n] , identifier[t] , identifier[o] keyword[in] identifier[grammar] . identifier[rule_defs] }
identifier[term_defs] =[]
identifier[rule_defs] =[]
keyword[def] identifier[rule_dependencies] ( identifier[symbol] ):
keyword[if] identifier[symbol] . identifier[type] != literal[string] :
keyword[return] []
keyword[try] :
identifier[_] , identifier[tree] , identifier[_] = identifier[imported_rules] [ identifier[symbol] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[GrammarError] ( literal[string] %( identifier[symbol] , identifier[namespace] ))
keyword[return] identifier[tree] . identifier[scan_values] ( keyword[lambda] identifier[x] : identifier[x] . identifier[type] keyword[in] ( literal[string] , literal[string] ))
keyword[def] identifier[get_namespace_name] ( identifier[name] ):
keyword[try] :
keyword[return] identifier[aliases] [ identifier[name] ]. identifier[value]
keyword[except] identifier[KeyError] :
keyword[if] identifier[name] [ literal[int] ]== literal[string] :
keyword[return] literal[string] %( identifier[namespace] , identifier[name] [ literal[int] :])
keyword[return] literal[string] %( identifier[namespace] , identifier[name] )
identifier[to_import] = identifier[list] ( identifier[bfs] ( identifier[aliases] , identifier[rule_dependencies] ))
keyword[for] identifier[symbol] keyword[in] identifier[to_import] :
keyword[if] identifier[symbol] . identifier[type] == literal[string] :
identifier[term_defs] . identifier[append] ([ identifier[get_namespace_name] ( identifier[symbol] ), identifier[imported_terms] [ identifier[symbol] ]])
keyword[else] :
keyword[assert] identifier[symbol] . identifier[type] == literal[string]
identifier[rule] = identifier[imported_rules] [ identifier[symbol] ]
keyword[for] identifier[t] keyword[in] identifier[rule] [ literal[int] ]. identifier[iter_subtrees] ():
keyword[for] identifier[i] , identifier[c] keyword[in] identifier[enumerate] ( identifier[t] . identifier[children] ):
keyword[if] identifier[isinstance] ( identifier[c] , identifier[Token] ) keyword[and] identifier[c] . identifier[type] keyword[in] ( literal[string] , literal[string] ):
identifier[t] . identifier[children] [ identifier[i] ]= identifier[Token] ( identifier[c] . identifier[type] , identifier[get_namespace_name] ( identifier[c] ))
identifier[rule_defs] . identifier[append] (( identifier[get_namespace_name] ( identifier[symbol] ), identifier[rule] [ literal[int] ], identifier[rule] [ literal[int] ]))
keyword[return] identifier[term_defs] , identifier[rule_defs] | def import_from_grammar_into_namespace(grammar, namespace, aliases):
"""Returns all rules and terminals of grammar, prepended
with a 'namespace' prefix, except for those which are aliased.
"""
imported_terms = dict(grammar.term_defs)
imported_rules = {n: (n, deepcopy(t), o) for (n, t, o) in grammar.rule_defs}
term_defs = []
rule_defs = []
def rule_dependencies(symbol):
if symbol.type != 'RULE':
return [] # depends on [control=['if'], data=[]]
try:
(_, tree, _) = imported_rules[symbol] # depends on [control=['try'], data=[]]
except KeyError:
raise GrammarError("Missing symbol '%s' in grammar %s" % (symbol, namespace)) # depends on [control=['except'], data=[]]
return tree.scan_values(lambda x: x.type in ('RULE', 'TERMINAL'))
def get_namespace_name(name):
try:
return aliases[name].value # depends on [control=['try'], data=[]]
except KeyError:
if name[0] == '_':
return '_%s__%s' % (namespace, name[1:]) # depends on [control=['if'], data=[]]
return '%s__%s' % (namespace, name) # depends on [control=['except'], data=[]]
to_import = list(bfs(aliases, rule_dependencies))
for symbol in to_import:
if symbol.type == 'TERMINAL':
term_defs.append([get_namespace_name(symbol), imported_terms[symbol]]) # depends on [control=['if'], data=[]]
else:
assert symbol.type == 'RULE'
rule = imported_rules[symbol]
for t in rule[1].iter_subtrees():
for (i, c) in enumerate(t.children):
if isinstance(c, Token) and c.type in ('RULE', 'TERMINAL'):
t.children[i] = Token(c.type, get_namespace_name(c)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['t']]
rule_defs.append((get_namespace_name(symbol), rule[1], rule[2])) # depends on [control=['for'], data=['symbol']]
return (term_defs, rule_defs) |
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char) | def function[copy, parameter[self, items]]:
constant[Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
]
return[call[name[NGram], parameter[<ast.IfExp object at 0x7da1b0f43820>, name[self].threshold, name[self].warp, name[self]._key, name[self].N, name[self]._pad_len, name[self]._pad_char]]] | keyword[def] identifier[copy] ( identifier[self] , identifier[items] = keyword[None] ):
literal[string]
keyword[return] identifier[NGram] ( identifier[items] keyword[if] identifier[items] keyword[is] keyword[not] keyword[None] keyword[else] identifier[self] ,
identifier[self] . identifier[threshold] , identifier[self] . identifier[warp] , identifier[self] . identifier[_key] ,
identifier[self] . identifier[N] , identifier[self] . identifier[_pad_len] , identifier[self] . identifier[_pad_char] ) | def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self, self.threshold, self.warp, self._key, self.N, self._pad_len, self._pad_char) |
def create_binary_annotation(key, value, annotation_type, host):
"""
Create a zipkin binary annotation object
:param key: name of the annotation, such as 'http.uri'
:param value: value of the annotation, such as a URI
:param annotation_type: type of annotation, such as AnnotationType.I32
:param host: zipkin endpoint object
:returns: zipkin binary annotation object
"""
return zipkin_core.BinaryAnnotation(
key=key,
value=value,
annotation_type=annotation_type,
host=host,
) | def function[create_binary_annotation, parameter[key, value, annotation_type, host]]:
constant[
Create a zipkin binary annotation object
:param key: name of the annotation, such as 'http.uri'
:param value: value of the annotation, such as a URI
:param annotation_type: type of annotation, such as AnnotationType.I32
:param host: zipkin endpoint object
:returns: zipkin binary annotation object
]
return[call[name[zipkin_core].BinaryAnnotation, parameter[]]] | keyword[def] identifier[create_binary_annotation] ( identifier[key] , identifier[value] , identifier[annotation_type] , identifier[host] ):
literal[string]
keyword[return] identifier[zipkin_core] . identifier[BinaryAnnotation] (
identifier[key] = identifier[key] ,
identifier[value] = identifier[value] ,
identifier[annotation_type] = identifier[annotation_type] ,
identifier[host] = identifier[host] ,
) | def create_binary_annotation(key, value, annotation_type, host):
"""
Create a zipkin binary annotation object
:param key: name of the annotation, such as 'http.uri'
:param value: value of the annotation, such as a URI
:param annotation_type: type of annotation, such as AnnotationType.I32
:param host: zipkin endpoint object
:returns: zipkin binary annotation object
"""
return zipkin_core.BinaryAnnotation(key=key, value=value, annotation_type=annotation_type, host=host) |
def sha256file(abspath, nbytes=0, chunk_size=DEFAULT_CHUNK_SIZE):
"""
Return sha256 hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file
:param nbytes: only has first N bytes of the file. if 0 or None,
hash all file
"""
return get_file_fingerprint(abspath, hashlib.sha256, nbytes=nbytes, chunk_size=chunk_size) | def function[sha256file, parameter[abspath, nbytes, chunk_size]]:
constant[
Return sha256 hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file
:param nbytes: only has first N bytes of the file. if 0 or None,
hash all file
]
return[call[name[get_file_fingerprint], parameter[name[abspath], name[hashlib].sha256]]] | keyword[def] identifier[sha256file] ( identifier[abspath] , identifier[nbytes] = literal[int] , identifier[chunk_size] = identifier[DEFAULT_CHUNK_SIZE] ):
literal[string]
keyword[return] identifier[get_file_fingerprint] ( identifier[abspath] , identifier[hashlib] . identifier[sha256] , identifier[nbytes] = identifier[nbytes] , identifier[chunk_size] = identifier[chunk_size] ) | def sha256file(abspath, nbytes=0, chunk_size=DEFAULT_CHUNK_SIZE):
"""
Return sha256 hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file
:param nbytes: only has first N bytes of the file. if 0 or None,
hash all file
"""
return get_file_fingerprint(abspath, hashlib.sha256, nbytes=nbytes, chunk_size=chunk_size) |
def mp_serialize_dict(
bundle: dict,
separator: str = '.',
serialize: t.Optional[t.Callable] = dump_yaml,
value_prefix: str = '::YAML::\n') -> t.List[t.Tuple[str, bytes]]:
"""
Transforms a given ``bundle`` into a *sorted* list of tuples with materialized value paths and values:
``('path.to.value', b'<some>')``. If the ``<some>`` value is not an instance of a basic type, it's serialized
with ``serialize`` callback. If this value is an empty string, it's serialized anyway to enforce correct
type if storage backend does not support saving empty strings.
:param bundle: a dict to materialize
:param separator: build paths with a given separator
:param serialize: a method to serialize non-basic types, default is ``yaml.dump``
:param value_prefix: a prefix for non-basic serialized types
:return: a list of tuples ``(mat_path, b'value')``
::
sample = {
'bool_flag': '', # flag
'unicode': 'вася',
'none_value': None,
'debug': True,
'mixed': ['ascii', 'юникод', 1, {'d': 1}, {'b': 2}],
'nested': {
'a': {
'b': 2,
'c': b'bytes',
}
}
}
result = mp_serialize_dict(sample, separator='/')
assert result == [
('nested/a/b', b'2'),
('nested/a/c', b'bytes'),
('bool_flag', b"::YAML::\\n''\\n"),
('debug', b'true'),
('mixed', b'::YAML::\\n- ascii\\n- '
b'"\\\\u044E\\\\u043D\\\\u0438\\\\u043A\\\\u043E\\\\u0434"\\n- 1\\n- '
b'{d: 1}\\n- {b: 2}\\n'),
('none_value', None),
('unicode', b'\\xd0\\xb2\\xd0\\xb0\\xd1\\x81\\xd1\\x8f')
]
"""
md = materialize_dict(bundle, separator=separator)
res = []
for path, value in md:
# have to serialize values (value should be None or a string / binary data)
if value is None:
pass
elif isinstance(value, str) and value != '':
# check for value != '' used to armor empty string with forced serialization
# since it can be not recognized by a storage backend
pass
elif isinstance(value, bytes):
pass
elif isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, (int, float, Decimal)):
value = str(value)
else:
value = (value_prefix + serialize(value))
if isinstance(value, str):
value = value.encode()
res.append((path, value))
return res | def function[mp_serialize_dict, parameter[bundle, separator, serialize, value_prefix]]:
constant[
Transforms a given ``bundle`` into a *sorted* list of tuples with materialized value paths and values:
``('path.to.value', b'<some>')``. If the ``<some>`` value is not an instance of a basic type, it's serialized
with ``serialize`` callback. If this value is an empty string, it's serialized anyway to enforce correct
type if storage backend does not support saving empty strings.
:param bundle: a dict to materialize
:param separator: build paths with a given separator
:param serialize: a method to serialize non-basic types, default is ``yaml.dump``
:param value_prefix: a prefix for non-basic serialized types
:return: a list of tuples ``(mat_path, b'value')``
::
sample = {
'bool_flag': '', # flag
'unicode': 'вася',
'none_value': None,
'debug': True,
'mixed': ['ascii', 'юникод', 1, {'d': 1}, {'b': 2}],
'nested': {
'a': {
'b': 2,
'c': b'bytes',
}
}
}
result = mp_serialize_dict(sample, separator='/')
assert result == [
('nested/a/b', b'2'),
('nested/a/c', b'bytes'),
('bool_flag', b"::YAML::\n''\n"),
('debug', b'true'),
('mixed', b'::YAML::\n- ascii\n- '
b'"\\u044E\\u043D\\u0438\\u043A\\u043E\\u0434"\n- 1\n- '
b'{d: 1}\n- {b: 2}\n'),
('none_value', None),
('unicode', b'\xd0\xb2\xd0\xb0\xd1\x81\xd1\x8f')
]
]
variable[md] assign[=] call[name[materialize_dict], parameter[name[bundle]]]
variable[res] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b10b0d00>, <ast.Name object at 0x7da1b10b0100>]]] in starred[name[md]] begin[:]
if compare[name[value] is constant[None]] begin[:]
pass
if call[name[isinstance], parameter[name[value], name[str]]] begin[:]
variable[value] assign[=] call[name[value].encode, parameter[]]
call[name[res].append, parameter[tuple[[<ast.Name object at 0x7da1b1039330>, <ast.Name object at 0x7da1b10387f0>]]]]
return[name[res]] | keyword[def] identifier[mp_serialize_dict] (
identifier[bundle] : identifier[dict] ,
identifier[separator] : identifier[str] = literal[string] ,
identifier[serialize] : identifier[t] . identifier[Optional] [ identifier[t] . identifier[Callable] ]= identifier[dump_yaml] ,
identifier[value_prefix] : identifier[str] = literal[string] )-> identifier[t] . identifier[List] [ identifier[t] . identifier[Tuple] [ identifier[str] , identifier[bytes] ]]:
literal[string]
identifier[md] = identifier[materialize_dict] ( identifier[bundle] , identifier[separator] = identifier[separator] )
identifier[res] =[]
keyword[for] identifier[path] , identifier[value] keyword[in] identifier[md] :
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[pass]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[str] ) keyword[and] identifier[value] != literal[string] :
keyword[pass]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[bytes] ):
keyword[pass]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[bool] ):
identifier[value] = identifier[str] ( identifier[value] ). identifier[lower] ()
keyword[elif] identifier[isinstance] ( identifier[value] ,( identifier[int] , identifier[float] , identifier[Decimal] )):
identifier[value] = identifier[str] ( identifier[value] )
keyword[else] :
identifier[value] =( identifier[value_prefix] + identifier[serialize] ( identifier[value] ))
keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ):
identifier[value] = identifier[value] . identifier[encode] ()
identifier[res] . identifier[append] (( identifier[path] , identifier[value] ))
keyword[return] identifier[res] | def mp_serialize_dict(bundle: dict, separator: str='.', serialize: t.Optional[t.Callable]=dump_yaml, value_prefix: str='::YAML::\n') -> t.List[t.Tuple[str, bytes]]:
"""
Transforms a given ``bundle`` into a *sorted* list of tuples with materialized value paths and values:
``('path.to.value', b'<some>')``. If the ``<some>`` value is not an instance of a basic type, it's serialized
with ``serialize`` callback. If this value is an empty string, it's serialized anyway to enforce correct
type if storage backend does not support saving empty strings.
:param bundle: a dict to materialize
:param separator: build paths with a given separator
:param serialize: a method to serialize non-basic types, default is ``yaml.dump``
:param value_prefix: a prefix for non-basic serialized types
:return: a list of tuples ``(mat_path, b'value')``
::
sample = {
'bool_flag': '', # flag
'unicode': 'вася',
'none_value': None,
'debug': True,
'mixed': ['ascii', 'юникод', 1, {'d': 1}, {'b': 2}],
'nested': {
'a': {
'b': 2,
'c': b'bytes',
}
}
}
result = mp_serialize_dict(sample, separator='/')
assert result == [
('nested/a/b', b'2'),
('nested/a/c', b'bytes'),
('bool_flag', b"::YAML::\\n''\\n"),
('debug', b'true'),
('mixed', b'::YAML::\\n- ascii\\n- '
b'"\\\\u044E\\\\u043D\\\\u0438\\\\u043A\\\\u043E\\\\u0434"\\n- 1\\n- '
b'{d: 1}\\n- {b: 2}\\n'),
('none_value', None),
('unicode', b'\\xd0\\xb2\\xd0\\xb0\\xd1\\x81\\xd1\\x8f')
]
"""
md = materialize_dict(bundle, separator=separator)
res = []
for (path, value) in md:
# have to serialize values (value should be None or a string / binary data)
if value is None:
pass # depends on [control=['if'], data=[]]
elif isinstance(value, str) and value != '':
# check for value != '' used to armor empty string with forced serialization
# since it can be not recognized by a storage backend
pass # depends on [control=['if'], data=[]]
elif isinstance(value, bytes):
pass # depends on [control=['if'], data=[]]
elif isinstance(value, bool):
value = str(value).lower() # depends on [control=['if'], data=[]]
elif isinstance(value, (int, float, Decimal)):
value = str(value) # depends on [control=['if'], data=[]]
else:
value = value_prefix + serialize(value)
if isinstance(value, str):
value = value.encode() # depends on [control=['if'], data=[]]
res.append((path, value)) # depends on [control=['for'], data=[]]
return res |
def clean_abstract(abstract, remove_tags=['xref', 'ext-link', 'inline-formula', 'mml:*']):
"""
Remove unwanted tags from abstract string,
parsing it as HTML, then only keep the body paragraph contents
"""
if remove_tags:
for tag_name in remove_tags:
abstract = utils.remove_tag(tag_name, abstract)
return abstract | def function[clean_abstract, parameter[abstract, remove_tags]]:
constant[
Remove unwanted tags from abstract string,
parsing it as HTML, then only keep the body paragraph contents
]
if name[remove_tags] begin[:]
for taget[name[tag_name]] in starred[name[remove_tags]] begin[:]
variable[abstract] assign[=] call[name[utils].remove_tag, parameter[name[tag_name], name[abstract]]]
return[name[abstract]] | keyword[def] identifier[clean_abstract] ( identifier[abstract] , identifier[remove_tags] =[ literal[string] , literal[string] , literal[string] , literal[string] ]):
literal[string]
keyword[if] identifier[remove_tags] :
keyword[for] identifier[tag_name] keyword[in] identifier[remove_tags] :
identifier[abstract] = identifier[utils] . identifier[remove_tag] ( identifier[tag_name] , identifier[abstract] )
keyword[return] identifier[abstract] | def clean_abstract(abstract, remove_tags=['xref', 'ext-link', 'inline-formula', 'mml:*']):
"""
Remove unwanted tags from abstract string,
parsing it as HTML, then only keep the body paragraph contents
"""
if remove_tags:
for tag_name in remove_tags:
abstract = utils.remove_tag(tag_name, abstract) # depends on [control=['for'], data=['tag_name']] # depends on [control=['if'], data=[]]
return abstract |
def is_newer_file(a, b):
"""
Check if the file a is newer than file b
"""
if not (op.exists(a) and op.exists(b)):
return False
am = os.stat(a).st_mtime
bm = os.stat(b).st_mtime
return am > bm | def function[is_newer_file, parameter[a, b]]:
constant[
Check if the file a is newer than file b
]
if <ast.UnaryOp object at 0x7da1b09be2f0> begin[:]
return[constant[False]]
variable[am] assign[=] call[name[os].stat, parameter[name[a]]].st_mtime
variable[bm] assign[=] call[name[os].stat, parameter[name[b]]].st_mtime
return[compare[name[am] greater[>] name[bm]]] | keyword[def] identifier[is_newer_file] ( identifier[a] , identifier[b] ):
literal[string]
keyword[if] keyword[not] ( identifier[op] . identifier[exists] ( identifier[a] ) keyword[and] identifier[op] . identifier[exists] ( identifier[b] )):
keyword[return] keyword[False]
identifier[am] = identifier[os] . identifier[stat] ( identifier[a] ). identifier[st_mtime]
identifier[bm] = identifier[os] . identifier[stat] ( identifier[b] ). identifier[st_mtime]
keyword[return] identifier[am] > identifier[bm] | def is_newer_file(a, b):
"""
Check if the file a is newer than file b
"""
if not (op.exists(a) and op.exists(b)):
return False # depends on [control=['if'], data=[]]
am = os.stat(a).st_mtime
bm = os.stat(b).st_mtime
return am > bm |
def _logoutclient(self, useruuid, clientuuid):
"""Log out a client and possibly associated user"""
self.log("Cleaning up client of logged in user.", lvl=debug)
try:
self._users[useruuid].clients.remove(clientuuid)
if len(self._users[useruuid].clients) == 0:
self.log("Last client of user disconnected.", lvl=verbose)
self.fireEvent(userlogout(useruuid, clientuuid))
del self._users[useruuid]
self._clients[clientuuid].useruuid = None
except Exception as e:
self.log("Error during client logout: ", e, type(e),
clientuuid, useruuid, lvl=error,
exc=True) | def function[_logoutclient, parameter[self, useruuid, clientuuid]]:
constant[Log out a client and possibly associated user]
call[name[self].log, parameter[constant[Cleaning up client of logged in user.]]]
<ast.Try object at 0x7da1b0f47e50> | keyword[def] identifier[_logoutclient] ( identifier[self] , identifier[useruuid] , identifier[clientuuid] ):
literal[string]
identifier[self] . identifier[log] ( literal[string] , identifier[lvl] = identifier[debug] )
keyword[try] :
identifier[self] . identifier[_users] [ identifier[useruuid] ]. identifier[clients] . identifier[remove] ( identifier[clientuuid] )
keyword[if] identifier[len] ( identifier[self] . identifier[_users] [ identifier[useruuid] ]. identifier[clients] )== literal[int] :
identifier[self] . identifier[log] ( literal[string] , identifier[lvl] = identifier[verbose] )
identifier[self] . identifier[fireEvent] ( identifier[userlogout] ( identifier[useruuid] , identifier[clientuuid] ))
keyword[del] identifier[self] . identifier[_users] [ identifier[useruuid] ]
identifier[self] . identifier[_clients] [ identifier[clientuuid] ]. identifier[useruuid] = keyword[None]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[log] ( literal[string] , identifier[e] , identifier[type] ( identifier[e] ),
identifier[clientuuid] , identifier[useruuid] , identifier[lvl] = identifier[error] ,
identifier[exc] = keyword[True] ) | def _logoutclient(self, useruuid, clientuuid):
"""Log out a client and possibly associated user"""
self.log('Cleaning up client of logged in user.', lvl=debug)
try:
self._users[useruuid].clients.remove(clientuuid)
if len(self._users[useruuid].clients) == 0:
self.log('Last client of user disconnected.', lvl=verbose)
self.fireEvent(userlogout(useruuid, clientuuid))
del self._users[useruuid] # depends on [control=['if'], data=[]]
self._clients[clientuuid].useruuid = None # depends on [control=['try'], data=[]]
except Exception as e:
self.log('Error during client logout: ', e, type(e), clientuuid, useruuid, lvl=error, exc=True) # depends on [control=['except'], data=['e']] |
def continuous_periods(self):
"""
Return a list of continuous data periods by removing the data gaps from the overall record.
"""
result = []
# For the first period
start_date = self.start_date
for gap in self.pot_data_gaps:
end_date = gap.start_date - timedelta(days=1)
result.append(PotPeriod(start_date, end_date))
# For the next period
start_date = gap.end_date + timedelta(days=1)
# For the last period
end_date = self.end_date
result.append(PotPeriod(start_date, end_date))
return result | def function[continuous_periods, parameter[self]]:
constant[
Return a list of continuous data periods by removing the data gaps from the overall record.
]
variable[result] assign[=] list[[]]
variable[start_date] assign[=] name[self].start_date
for taget[name[gap]] in starred[name[self].pot_data_gaps] begin[:]
variable[end_date] assign[=] binary_operation[name[gap].start_date - call[name[timedelta], parameter[]]]
call[name[result].append, parameter[call[name[PotPeriod], parameter[name[start_date], name[end_date]]]]]
variable[start_date] assign[=] binary_operation[name[gap].end_date + call[name[timedelta], parameter[]]]
variable[end_date] assign[=] name[self].end_date
call[name[result].append, parameter[call[name[PotPeriod], parameter[name[start_date], name[end_date]]]]]
return[name[result]] | keyword[def] identifier[continuous_periods] ( identifier[self] ):
literal[string]
identifier[result] =[]
identifier[start_date] = identifier[self] . identifier[start_date]
keyword[for] identifier[gap] keyword[in] identifier[self] . identifier[pot_data_gaps] :
identifier[end_date] = identifier[gap] . identifier[start_date] - identifier[timedelta] ( identifier[days] = literal[int] )
identifier[result] . identifier[append] ( identifier[PotPeriod] ( identifier[start_date] , identifier[end_date] ))
identifier[start_date] = identifier[gap] . identifier[end_date] + identifier[timedelta] ( identifier[days] = literal[int] )
identifier[end_date] = identifier[self] . identifier[end_date]
identifier[result] . identifier[append] ( identifier[PotPeriod] ( identifier[start_date] , identifier[end_date] ))
keyword[return] identifier[result] | def continuous_periods(self):
"""
Return a list of continuous data periods by removing the data gaps from the overall record.
"""
result = []
# For the first period
start_date = self.start_date
for gap in self.pot_data_gaps:
end_date = gap.start_date - timedelta(days=1)
result.append(PotPeriod(start_date, end_date))
# For the next period
start_date = gap.end_date + timedelta(days=1) # depends on [control=['for'], data=['gap']]
# For the last period
end_date = self.end_date
result.append(PotPeriod(start_date, end_date))
return result |
def me(self, include=None):
"""
Return the logged in user
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading#abilities>`__.
"""
return self._query_zendesk(self.endpoint.me, 'user', include=include) | def function[me, parameter[self, include]]:
constant[
Return the logged in user
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading#abilities>`__.
]
return[call[name[self]._query_zendesk, parameter[name[self].endpoint.me, constant[user]]]] | keyword[def] identifier[me] ( identifier[self] , identifier[include] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_query_zendesk] ( identifier[self] . identifier[endpoint] . identifier[me] , literal[string] , identifier[include] = identifier[include] ) | def me(self, include=None):
"""
Return the logged in user
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading#abilities>`__.
"""
return self._query_zendesk(self.endpoint.me, 'user', include=include) |
def Modify(self, user_type=None, password=None):
"""Modifies user's type and/or password."""
args = user_management_pb2.ApiModifyGrrUserArgs(
username=self.username, user_type=user_type)
if user_type is not None:
args.user_type = user_type
if password is not None:
args.password = password
data = self._context.SendRequest("ModifyGrrUser", args)
return GrrUser(data=data, context=self._context) | def function[Modify, parameter[self, user_type, password]]:
constant[Modifies user's type and/or password.]
variable[args] assign[=] call[name[user_management_pb2].ApiModifyGrrUserArgs, parameter[]]
if compare[name[user_type] is_not constant[None]] begin[:]
name[args].user_type assign[=] name[user_type]
if compare[name[password] is_not constant[None]] begin[:]
name[args].password assign[=] name[password]
variable[data] assign[=] call[name[self]._context.SendRequest, parameter[constant[ModifyGrrUser], name[args]]]
return[call[name[GrrUser], parameter[]]] | keyword[def] identifier[Modify] ( identifier[self] , identifier[user_type] = keyword[None] , identifier[password] = keyword[None] ):
literal[string]
identifier[args] = identifier[user_management_pb2] . identifier[ApiModifyGrrUserArgs] (
identifier[username] = identifier[self] . identifier[username] , identifier[user_type] = identifier[user_type] )
keyword[if] identifier[user_type] keyword[is] keyword[not] keyword[None] :
identifier[args] . identifier[user_type] = identifier[user_type]
keyword[if] identifier[password] keyword[is] keyword[not] keyword[None] :
identifier[args] . identifier[password] = identifier[password]
identifier[data] = identifier[self] . identifier[_context] . identifier[SendRequest] ( literal[string] , identifier[args] )
keyword[return] identifier[GrrUser] ( identifier[data] = identifier[data] , identifier[context] = identifier[self] . identifier[_context] ) | def Modify(self, user_type=None, password=None):
"""Modifies user's type and/or password."""
args = user_management_pb2.ApiModifyGrrUserArgs(username=self.username, user_type=user_type)
if user_type is not None:
args.user_type = user_type # depends on [control=['if'], data=['user_type']]
if password is not None:
args.password = password # depends on [control=['if'], data=['password']]
data = self._context.SendRequest('ModifyGrrUser', args)
return GrrUser(data=data, context=self._context) |
def _run_bookkeeping_sql_scripts(
self):
"""*run bookkeeping sql scripts*
"""
self.log.info('starting the ``_run_bookkeeping_sql_scripts`` method')
moduleDirectory = os.path.dirname(__file__)
mysqlScripts = moduleDirectory + "/mysql"
directory_script_runner(
log=self.log,
pathToScriptDirectory=mysqlScripts,
databaseName=self.settings[
"database settings"]["atlasMovers"]["db"],
force=True,
loginPath=self.settings["database settings"][
"atlasMovers"]["loginPath"],
waitForResult=True,
successRule=False,
failureRule=False
)
self.log.info('completed the ``_run_bookkeeping_sql_scripts`` method')
return None | def function[_run_bookkeeping_sql_scripts, parameter[self]]:
constant[*run bookkeeping sql scripts*
]
call[name[self].log.info, parameter[constant[starting the ``_run_bookkeeping_sql_scripts`` method]]]
variable[moduleDirectory] assign[=] call[name[os].path.dirname, parameter[name[__file__]]]
variable[mysqlScripts] assign[=] binary_operation[name[moduleDirectory] + constant[/mysql]]
call[name[directory_script_runner], parameter[]]
call[name[self].log.info, parameter[constant[completed the ``_run_bookkeeping_sql_scripts`` method]]]
return[constant[None]] | keyword[def] identifier[_run_bookkeeping_sql_scripts] (
identifier[self] ):
literal[string]
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
identifier[moduleDirectory] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] )
identifier[mysqlScripts] = identifier[moduleDirectory] + literal[string]
identifier[directory_script_runner] (
identifier[log] = identifier[self] . identifier[log] ,
identifier[pathToScriptDirectory] = identifier[mysqlScripts] ,
identifier[databaseName] = identifier[self] . identifier[settings] [
literal[string] ][ literal[string] ][ literal[string] ],
identifier[force] = keyword[True] ,
identifier[loginPath] = identifier[self] . identifier[settings] [ literal[string] ][
literal[string] ][ literal[string] ],
identifier[waitForResult] = keyword[True] ,
identifier[successRule] = keyword[False] ,
identifier[failureRule] = keyword[False]
)
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
keyword[return] keyword[None] | def _run_bookkeeping_sql_scripts(self):
"""*run bookkeeping sql scripts*
"""
self.log.info('starting the ``_run_bookkeeping_sql_scripts`` method')
moduleDirectory = os.path.dirname(__file__)
mysqlScripts = moduleDirectory + '/mysql'
directory_script_runner(log=self.log, pathToScriptDirectory=mysqlScripts, databaseName=self.settings['database settings']['atlasMovers']['db'], force=True, loginPath=self.settings['database settings']['atlasMovers']['loginPath'], waitForResult=True, successRule=False, failureRule=False)
self.log.info('completed the ``_run_bookkeeping_sql_scripts`` method')
return None |
def get_credentials(scopes=None, secrets=None, storage=None, no_webserver=False):
"""Make OAuth 2.0 credentials for scopes from ``secrets`` and ``storage`` files.
Args:
scopes: scope URL(s) or ``'read'``, ``'write'`` (default: ``%r``)
secrets: location of secrets file (default: ``%r``)
storage: location of storage file (default: ``%r``)
no_webserver: url/code prompt instead of webbrowser based auth
see https://developers.google.com/sheets/quickstart/python
see https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
"""
scopes = Scopes.get(scopes)
if secrets is None:
secrets = SECRETS
if storage is None:
storage = STORAGE
secrets, storage = map(os.path.expanduser, (secrets, storage))
store = file.Storage(storage)
creds = store.get()
if creds is None or creds.invalid:
flow = client.flow_from_clientsecrets(secrets, scopes)
args = ['--noauth_local_webserver'] if no_webserver else []
flags = tools.argparser.parse_args(args)
creds = tools.run_flow(flow, store, flags)
return creds | def function[get_credentials, parameter[scopes, secrets, storage, no_webserver]]:
constant[Make OAuth 2.0 credentials for scopes from ``secrets`` and ``storage`` files.
Args:
scopes: scope URL(s) or ``'read'``, ``'write'`` (default: ``%r``)
secrets: location of secrets file (default: ``%r``)
storage: location of storage file (default: ``%r``)
no_webserver: url/code prompt instead of webbrowser based auth
see https://developers.google.com/sheets/quickstart/python
see https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
]
variable[scopes] assign[=] call[name[Scopes].get, parameter[name[scopes]]]
if compare[name[secrets] is constant[None]] begin[:]
variable[secrets] assign[=] name[SECRETS]
if compare[name[storage] is constant[None]] begin[:]
variable[storage] assign[=] name[STORAGE]
<ast.Tuple object at 0x7da1b1c61510> assign[=] call[name[map], parameter[name[os].path.expanduser, tuple[[<ast.Name object at 0x7da1b1c63130>, <ast.Name object at 0x7da1b1c60dc0>]]]]
variable[store] assign[=] call[name[file].Storage, parameter[name[storage]]]
variable[creds] assign[=] call[name[store].get, parameter[]]
if <ast.BoolOp object at 0x7da1b1b171c0> begin[:]
variable[flow] assign[=] call[name[client].flow_from_clientsecrets, parameter[name[secrets], name[scopes]]]
variable[args] assign[=] <ast.IfExp object at 0x7da1b1b17ee0>
variable[flags] assign[=] call[name[tools].argparser.parse_args, parameter[name[args]]]
variable[creds] assign[=] call[name[tools].run_flow, parameter[name[flow], name[store], name[flags]]]
return[name[creds]] | keyword[def] identifier[get_credentials] ( identifier[scopes] = keyword[None] , identifier[secrets] = keyword[None] , identifier[storage] = keyword[None] , identifier[no_webserver] = keyword[False] ):
literal[string]
identifier[scopes] = identifier[Scopes] . identifier[get] ( identifier[scopes] )
keyword[if] identifier[secrets] keyword[is] keyword[None] :
identifier[secrets] = identifier[SECRETS]
keyword[if] identifier[storage] keyword[is] keyword[None] :
identifier[storage] = identifier[STORAGE]
identifier[secrets] , identifier[storage] = identifier[map] ( identifier[os] . identifier[path] . identifier[expanduser] ,( identifier[secrets] , identifier[storage] ))
identifier[store] = identifier[file] . identifier[Storage] ( identifier[storage] )
identifier[creds] = identifier[store] . identifier[get] ()
keyword[if] identifier[creds] keyword[is] keyword[None] keyword[or] identifier[creds] . identifier[invalid] :
identifier[flow] = identifier[client] . identifier[flow_from_clientsecrets] ( identifier[secrets] , identifier[scopes] )
identifier[args] =[ literal[string] ] keyword[if] identifier[no_webserver] keyword[else] []
identifier[flags] = identifier[tools] . identifier[argparser] . identifier[parse_args] ( identifier[args] )
identifier[creds] = identifier[tools] . identifier[run_flow] ( identifier[flow] , identifier[store] , identifier[flags] )
keyword[return] identifier[creds] | def get_credentials(scopes=None, secrets=None, storage=None, no_webserver=False):
"""Make OAuth 2.0 credentials for scopes from ``secrets`` and ``storage`` files.
Args:
scopes: scope URL(s) or ``'read'``, ``'write'`` (default: ``%r``)
secrets: location of secrets file (default: ``%r``)
storage: location of storage file (default: ``%r``)
no_webserver: url/code prompt instead of webbrowser based auth
see https://developers.google.com/sheets/quickstart/python
see https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
"""
scopes = Scopes.get(scopes)
if secrets is None:
secrets = SECRETS # depends on [control=['if'], data=['secrets']]
if storage is None:
storage = STORAGE # depends on [control=['if'], data=['storage']]
(secrets, storage) = map(os.path.expanduser, (secrets, storage))
store = file.Storage(storage)
creds = store.get()
if creds is None or creds.invalid:
flow = client.flow_from_clientsecrets(secrets, scopes)
args = ['--noauth_local_webserver'] if no_webserver else []
flags = tools.argparser.parse_args(args)
creds = tools.run_flow(flow, store, flags) # depends on [control=['if'], data=[]]
return creds |
def create(self, output_path, dry_run=False, output_format=None, compresslevel=None):
"""
Create the archive at output_file_path.
Type of the archive is determined either by extension of output_file_path or by output_format.
Supported formats are: gz, zip, bz2, xz, tar, tgz, txz
@param output_path: Output file path.
@type output_path: str
@param dry_run: Determines whether create should do nothing but print what it would archive.
@type dry_run: bool
@param output_format: Determines format of the output archive. If None, format is determined from extension
of output_file_path.
@type output_format: str
"""
if output_format is None:
file_name, file_ext = path.splitext(output_path)
output_format = file_ext[len(extsep):].lower()
self.LOG.debug("Output format is not explicitly set, determined format is {0}.".format(output_format))
if not dry_run:
if output_format in self.ZIPFILE_FORMATS:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED
if compresslevel is not None:
if sys.version_info > (3, 7):
archive = ZipFile(path.abspath(output_path), 'w', compresslevel=compresslevel)
else:
raise ValueError("Compression level for zip archives requires Python 3.7+")
else:
archive = ZipFile(path.abspath(output_path), 'w')
def add_file(file_path, arcname):
if not path.islink(file_path):
archive.write(file_path, arcname, ZIP_DEFLATED)
else:
i = ZipInfo(arcname)
i.create_system = 3
i.external_attr = 0xA1ED0000
archive.writestr(i, readlink(file_path))
elif output_format in self.TARFILE_FORMATS:
import tarfile
mode = self.TARFILE_FORMATS[output_format]
if compresslevel is not None:
try:
archive = tarfile.open(path.abspath(output_path), mode, compresslevel=compresslevel)
except TypeError:
raise ValueError("{0} cannot be compressed".format(output_format))
else:
archive = tarfile.open(path.abspath(output_path), mode)
def add_file(file_path, arcname):
archive.add(file_path, arcname)
else:
raise ValueError("unknown format: {0}".format(output_format))
def archiver(file_path, arcname):
self.LOG.debug("{0} => {1}".format(file_path, arcname))
add_file(file_path, arcname)
else:
archive = None
def archiver(file_path, arcname):
self.LOG.info("{0} => {1}".format(file_path, arcname))
self.archive_all_files(archiver)
if archive is not None:
archive.close() | def function[create, parameter[self, output_path, dry_run, output_format, compresslevel]]:
constant[
Create the archive at output_file_path.
Type of the archive is determined either by extension of output_file_path or by output_format.
Supported formats are: gz, zip, bz2, xz, tar, tgz, txz
@param output_path: Output file path.
@type output_path: str
@param dry_run: Determines whether create should do nothing but print what it would archive.
@type dry_run: bool
@param output_format: Determines format of the output archive. If None, format is determined from extension
of output_file_path.
@type output_format: str
]
if compare[name[output_format] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b07a5a80> assign[=] call[name[path].splitext, parameter[name[output_path]]]
variable[output_format] assign[=] call[call[name[file_ext]][<ast.Slice object at 0x7da1b07a6e90>].lower, parameter[]]
call[name[self].LOG.debug, parameter[call[constant[Output format is not explicitly set, determined format is {0}.].format, parameter[name[output_format]]]]]
if <ast.UnaryOp object at 0x7da1b07b64d0> begin[:]
if compare[name[output_format] in name[self].ZIPFILE_FORMATS] begin[:]
from relative_module[zipfile] import module[ZipFile], module[ZipInfo], module[ZIP_DEFLATED]
if compare[name[compresslevel] is_not constant[None]] begin[:]
if compare[name[sys].version_info greater[>] tuple[[<ast.Constant object at 0x7da1b07b66b0>, <ast.Constant object at 0x7da1b07b5720>]]] begin[:]
variable[archive] assign[=] call[name[ZipFile], parameter[call[name[path].abspath, parameter[name[output_path]]], constant[w]]]
def function[add_file, parameter[file_path, arcname]]:
if <ast.UnaryOp object at 0x7da1b07a5e70> begin[:]
call[name[archive].write, parameter[name[file_path], name[arcname], name[ZIP_DEFLATED]]]
def function[archiver, parameter[file_path, arcname]]:
call[name[self].LOG.debug, parameter[call[constant[{0} => {1}].format, parameter[name[file_path], name[arcname]]]]]
call[name[add_file], parameter[name[file_path], name[arcname]]]
call[name[self].archive_all_files, parameter[name[archiver]]]
if compare[name[archive] is_not constant[None]] begin[:]
call[name[archive].close, parameter[]] | keyword[def] identifier[create] ( identifier[self] , identifier[output_path] , identifier[dry_run] = keyword[False] , identifier[output_format] = keyword[None] , identifier[compresslevel] = keyword[None] ):
literal[string]
keyword[if] identifier[output_format] keyword[is] keyword[None] :
identifier[file_name] , identifier[file_ext] = identifier[path] . identifier[splitext] ( identifier[output_path] )
identifier[output_format] = identifier[file_ext] [ identifier[len] ( identifier[extsep] ):]. identifier[lower] ()
identifier[self] . identifier[LOG] . identifier[debug] ( literal[string] . identifier[format] ( identifier[output_format] ))
keyword[if] keyword[not] identifier[dry_run] :
keyword[if] identifier[output_format] keyword[in] identifier[self] . identifier[ZIPFILE_FORMATS] :
keyword[from] identifier[zipfile] keyword[import] identifier[ZipFile] , identifier[ZipInfo] , identifier[ZIP_DEFLATED]
keyword[if] identifier[compresslevel] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[sys] . identifier[version_info] >( literal[int] , literal[int] ):
identifier[archive] = identifier[ZipFile] ( identifier[path] . identifier[abspath] ( identifier[output_path] ), literal[string] , identifier[compresslevel] = identifier[compresslevel] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[archive] = identifier[ZipFile] ( identifier[path] . identifier[abspath] ( identifier[output_path] ), literal[string] )
keyword[def] identifier[add_file] ( identifier[file_path] , identifier[arcname] ):
keyword[if] keyword[not] identifier[path] . identifier[islink] ( identifier[file_path] ):
identifier[archive] . identifier[write] ( identifier[file_path] , identifier[arcname] , identifier[ZIP_DEFLATED] )
keyword[else] :
identifier[i] = identifier[ZipInfo] ( identifier[arcname] )
identifier[i] . identifier[create_system] = literal[int]
identifier[i] . identifier[external_attr] = literal[int]
identifier[archive] . identifier[writestr] ( identifier[i] , identifier[readlink] ( identifier[file_path] ))
keyword[elif] identifier[output_format] keyword[in] identifier[self] . identifier[TARFILE_FORMATS] :
keyword[import] identifier[tarfile]
identifier[mode] = identifier[self] . identifier[TARFILE_FORMATS] [ identifier[output_format] ]
keyword[if] identifier[compresslevel] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[archive] = identifier[tarfile] . identifier[open] ( identifier[path] . identifier[abspath] ( identifier[output_path] ), identifier[mode] , identifier[compresslevel] = identifier[compresslevel] )
keyword[except] identifier[TypeError] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[output_format] ))
keyword[else] :
identifier[archive] = identifier[tarfile] . identifier[open] ( identifier[path] . identifier[abspath] ( identifier[output_path] ), identifier[mode] )
keyword[def] identifier[add_file] ( identifier[file_path] , identifier[arcname] ):
identifier[archive] . identifier[add] ( identifier[file_path] , identifier[arcname] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[output_format] ))
keyword[def] identifier[archiver] ( identifier[file_path] , identifier[arcname] ):
identifier[self] . identifier[LOG] . identifier[debug] ( literal[string] . identifier[format] ( identifier[file_path] , identifier[arcname] ))
identifier[add_file] ( identifier[file_path] , identifier[arcname] )
keyword[else] :
identifier[archive] = keyword[None]
keyword[def] identifier[archiver] ( identifier[file_path] , identifier[arcname] ):
identifier[self] . identifier[LOG] . identifier[info] ( literal[string] . identifier[format] ( identifier[file_path] , identifier[arcname] ))
identifier[self] . identifier[archive_all_files] ( identifier[archiver] )
keyword[if] identifier[archive] keyword[is] keyword[not] keyword[None] :
identifier[archive] . identifier[close] () | def create(self, output_path, dry_run=False, output_format=None, compresslevel=None):
"""
Create the archive at output_file_path.
Type of the archive is determined either by extension of output_file_path or by output_format.
Supported formats are: gz, zip, bz2, xz, tar, tgz, txz
@param output_path: Output file path.
@type output_path: str
@param dry_run: Determines whether create should do nothing but print what it would archive.
@type dry_run: bool
@param output_format: Determines format of the output archive. If None, format is determined from extension
of output_file_path.
@type output_format: str
"""
if output_format is None:
(file_name, file_ext) = path.splitext(output_path)
output_format = file_ext[len(extsep):].lower()
self.LOG.debug('Output format is not explicitly set, determined format is {0}.'.format(output_format)) # depends on [control=['if'], data=['output_format']]
if not dry_run:
if output_format in self.ZIPFILE_FORMATS:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED
if compresslevel is not None:
if sys.version_info > (3, 7):
archive = ZipFile(path.abspath(output_path), 'w', compresslevel=compresslevel) # depends on [control=['if'], data=[]]
else:
raise ValueError('Compression level for zip archives requires Python 3.7+') # depends on [control=['if'], data=['compresslevel']]
else:
archive = ZipFile(path.abspath(output_path), 'w')
def add_file(file_path, arcname):
if not path.islink(file_path):
archive.write(file_path, arcname, ZIP_DEFLATED) # depends on [control=['if'], data=[]]
else:
i = ZipInfo(arcname)
i.create_system = 3
i.external_attr = 2716663808
archive.writestr(i, readlink(file_path)) # depends on [control=['if'], data=[]]
elif output_format in self.TARFILE_FORMATS:
import tarfile
mode = self.TARFILE_FORMATS[output_format]
if compresslevel is not None:
try:
archive = tarfile.open(path.abspath(output_path), mode, compresslevel=compresslevel) # depends on [control=['try'], data=[]]
except TypeError:
raise ValueError('{0} cannot be compressed'.format(output_format)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['compresslevel']]
else:
archive = tarfile.open(path.abspath(output_path), mode)
def add_file(file_path, arcname):
archive.add(file_path, arcname) # depends on [control=['if'], data=['output_format']]
else:
raise ValueError('unknown format: {0}'.format(output_format))
def archiver(file_path, arcname):
self.LOG.debug('{0} => {1}'.format(file_path, arcname))
add_file(file_path, arcname) # depends on [control=['if'], data=[]]
else:
archive = None
def archiver(file_path, arcname):
self.LOG.info('{0} => {1}'.format(file_path, arcname))
self.archive_all_files(archiver)
if archive is not None:
archive.close() # depends on [control=['if'], data=['archive']] |
def create_guest(self, capacity_id, test, guest_object):
"""Turns an empty Reserve Capacity into a real Virtual Guest
:param int capacity_id: ID of the RESERVED_CAPACITY_GROUP to create this guest into
:param bool test: True will use verifyOrder, False will use placeOrder
:param dictionary guest_object: Below is the minimum info you need to send in
guest_object = {
'domain': 'test.com',
'hostname': 'A1538172419',
'os_code': 'UBUNTU_LATEST_64',
'primary_disk': '25',
}
"""
vs_manager = VSManager(self.client)
mask = "mask[instances[id, billingItem[id, item[id,keyName]]], backendRouter[id, datacenter[name]]]"
capacity = self.get_object(capacity_id, mask=mask)
try:
capacity_flavor = capacity['instances'][0]['billingItem']['item']['keyName']
flavor = _flavor_string(capacity_flavor, guest_object['primary_disk'])
except KeyError:
raise SoftLayer.SoftLayerError("Unable to find capacity Flavor.")
guest_object['flavor'] = flavor
guest_object['datacenter'] = capacity['backendRouter']['datacenter']['name']
# Reserved capacity only supports SAN as of 20181008
guest_object['local_disk'] = False
template = vs_manager.verify_create_instance(**guest_object)
template['reservedCapacityId'] = capacity_id
if guest_object.get('ipv6'):
ipv6_price = self.ordering_manager.get_price_id_list('PUBLIC_CLOUD_SERVER', ['1_IPV6_ADDRESS'])
template['prices'].append({'id': ipv6_price[0]})
if test:
result = self.client.call('Product_Order', 'verifyOrder', template)
else:
result = self.client.call('Product_Order', 'placeOrder', template)
return result | def function[create_guest, parameter[self, capacity_id, test, guest_object]]:
constant[Turns an empty Reserve Capacity into a real Virtual Guest
:param int capacity_id: ID of the RESERVED_CAPACITY_GROUP to create this guest into
:param bool test: True will use verifyOrder, False will use placeOrder
:param dictionary guest_object: Below is the minimum info you need to send in
guest_object = {
'domain': 'test.com',
'hostname': 'A1538172419',
'os_code': 'UBUNTU_LATEST_64',
'primary_disk': '25',
}
]
variable[vs_manager] assign[=] call[name[VSManager], parameter[name[self].client]]
variable[mask] assign[=] constant[mask[instances[id, billingItem[id, item[id,keyName]]], backendRouter[id, datacenter[name]]]]
variable[capacity] assign[=] call[name[self].get_object, parameter[name[capacity_id]]]
<ast.Try object at 0x7da20e9b01f0>
call[name[guest_object]][constant[flavor]] assign[=] name[flavor]
call[name[guest_object]][constant[datacenter]] assign[=] call[call[call[name[capacity]][constant[backendRouter]]][constant[datacenter]]][constant[name]]
call[name[guest_object]][constant[local_disk]] assign[=] constant[False]
variable[template] assign[=] call[name[vs_manager].verify_create_instance, parameter[]]
call[name[template]][constant[reservedCapacityId]] assign[=] name[capacity_id]
if call[name[guest_object].get, parameter[constant[ipv6]]] begin[:]
variable[ipv6_price] assign[=] call[name[self].ordering_manager.get_price_id_list, parameter[constant[PUBLIC_CLOUD_SERVER], list[[<ast.Constant object at 0x7da20e9b2c50>]]]]
call[call[name[template]][constant[prices]].append, parameter[dictionary[[<ast.Constant object at 0x7da20e9b2ec0>], [<ast.Subscript object at 0x7da20e9b3550>]]]]
if name[test] begin[:]
variable[result] assign[=] call[name[self].client.call, parameter[constant[Product_Order], constant[verifyOrder], name[template]]]
return[name[result]] | keyword[def] identifier[create_guest] ( identifier[self] , identifier[capacity_id] , identifier[test] , identifier[guest_object] ):
literal[string]
identifier[vs_manager] = identifier[VSManager] ( identifier[self] . identifier[client] )
identifier[mask] = literal[string]
identifier[capacity] = identifier[self] . identifier[get_object] ( identifier[capacity_id] , identifier[mask] = identifier[mask] )
keyword[try] :
identifier[capacity_flavor] = identifier[capacity] [ literal[string] ][ literal[int] ][ literal[string] ][ literal[string] ][ literal[string] ]
identifier[flavor] = identifier[_flavor_string] ( identifier[capacity_flavor] , identifier[guest_object] [ literal[string] ])
keyword[except] identifier[KeyError] :
keyword[raise] identifier[SoftLayer] . identifier[SoftLayerError] ( literal[string] )
identifier[guest_object] [ literal[string] ]= identifier[flavor]
identifier[guest_object] [ literal[string] ]= identifier[capacity] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[guest_object] [ literal[string] ]= keyword[False]
identifier[template] = identifier[vs_manager] . identifier[verify_create_instance] (** identifier[guest_object] )
identifier[template] [ literal[string] ]= identifier[capacity_id]
keyword[if] identifier[guest_object] . identifier[get] ( literal[string] ):
identifier[ipv6_price] = identifier[self] . identifier[ordering_manager] . identifier[get_price_id_list] ( literal[string] ,[ literal[string] ])
identifier[template] [ literal[string] ]. identifier[append] ({ literal[string] : identifier[ipv6_price] [ literal[int] ]})
keyword[if] identifier[test] :
identifier[result] = identifier[self] . identifier[client] . identifier[call] ( literal[string] , literal[string] , identifier[template] )
keyword[else] :
identifier[result] = identifier[self] . identifier[client] . identifier[call] ( literal[string] , literal[string] , identifier[template] )
keyword[return] identifier[result] | def create_guest(self, capacity_id, test, guest_object):
"""Turns an empty Reserve Capacity into a real Virtual Guest
:param int capacity_id: ID of the RESERVED_CAPACITY_GROUP to create this guest into
:param bool test: True will use verifyOrder, False will use placeOrder
:param dictionary guest_object: Below is the minimum info you need to send in
guest_object = {
'domain': 'test.com',
'hostname': 'A1538172419',
'os_code': 'UBUNTU_LATEST_64',
'primary_disk': '25',
}
"""
vs_manager = VSManager(self.client)
mask = 'mask[instances[id, billingItem[id, item[id,keyName]]], backendRouter[id, datacenter[name]]]'
capacity = self.get_object(capacity_id, mask=mask)
try:
capacity_flavor = capacity['instances'][0]['billingItem']['item']['keyName']
flavor = _flavor_string(capacity_flavor, guest_object['primary_disk']) # depends on [control=['try'], data=[]]
except KeyError:
raise SoftLayer.SoftLayerError('Unable to find capacity Flavor.') # depends on [control=['except'], data=[]]
guest_object['flavor'] = flavor
guest_object['datacenter'] = capacity['backendRouter']['datacenter']['name']
# Reserved capacity only supports SAN as of 20181008
guest_object['local_disk'] = False
template = vs_manager.verify_create_instance(**guest_object)
template['reservedCapacityId'] = capacity_id
if guest_object.get('ipv6'):
ipv6_price = self.ordering_manager.get_price_id_list('PUBLIC_CLOUD_SERVER', ['1_IPV6_ADDRESS'])
template['prices'].append({'id': ipv6_price[0]}) # depends on [control=['if'], data=[]]
if test:
result = self.client.call('Product_Order', 'verifyOrder', template) # depends on [control=['if'], data=[]]
else:
result = self.client.call('Product_Order', 'placeOrder', template)
return result |
def xray_requests_send(wrapped, instance, args, kwargs):
"""Wrapper around the requests library's low-level send method."""
return generic_xray_wrapper(
wrapped, instance, args, kwargs,
name='requests',
namespace='remote',
metadata_extractor=extract_http_metadata,
) | def function[xray_requests_send, parameter[wrapped, instance, args, kwargs]]:
constant[Wrapper around the requests library's low-level send method.]
return[call[name[generic_xray_wrapper], parameter[name[wrapped], name[instance], name[args], name[kwargs]]]] | keyword[def] identifier[xray_requests_send] ( identifier[wrapped] , identifier[instance] , identifier[args] , identifier[kwargs] ):
literal[string]
keyword[return] identifier[generic_xray_wrapper] (
identifier[wrapped] , identifier[instance] , identifier[args] , identifier[kwargs] ,
identifier[name] = literal[string] ,
identifier[namespace] = literal[string] ,
identifier[metadata_extractor] = identifier[extract_http_metadata] ,
) | def xray_requests_send(wrapped, instance, args, kwargs):
"""Wrapper around the requests library's low-level send method."""
return generic_xray_wrapper(wrapped, instance, args, kwargs, name='requests', namespace='remote', metadata_extractor=extract_http_metadata) |
def get_proficiencies_on_date(self, from_, to):
"""Gets a ``ProficiencyList`` effecyive during the entire given date range inclusive but not confined to the date range.
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.learning.ProficiencyList) - the returned
``Proficiency`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``from`` or ``to`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_on_date
proficiency_list = []
for proficiency in self.get_proficiencies():
if overlap(from_, to, proficiency.start_date, proficiency.end_date):
proficiency_list.append(proficiency)
return objects.ProficiencyList(proficiency_list, runtime=self._runtime) | def function[get_proficiencies_on_date, parameter[self, from_, to]]:
constant[Gets a ``ProficiencyList`` effecyive during the entire given date range inclusive but not confined to the date range.
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.learning.ProficiencyList) - the returned
``Proficiency`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``from`` or ``to`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
variable[proficiency_list] assign[=] list[[]]
for taget[name[proficiency]] in starred[call[name[self].get_proficiencies, parameter[]]] begin[:]
if call[name[overlap], parameter[name[from_], name[to], name[proficiency].start_date, name[proficiency].end_date]] begin[:]
call[name[proficiency_list].append, parameter[name[proficiency]]]
return[call[name[objects].ProficiencyList, parameter[name[proficiency_list]]]] | keyword[def] identifier[get_proficiencies_on_date] ( identifier[self] , identifier[from_] , identifier[to] ):
literal[string]
identifier[proficiency_list] =[]
keyword[for] identifier[proficiency] keyword[in] identifier[self] . identifier[get_proficiencies] ():
keyword[if] identifier[overlap] ( identifier[from_] , identifier[to] , identifier[proficiency] . identifier[start_date] , identifier[proficiency] . identifier[end_date] ):
identifier[proficiency_list] . identifier[append] ( identifier[proficiency] )
keyword[return] identifier[objects] . identifier[ProficiencyList] ( identifier[proficiency_list] , identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_proficiencies_on_date(self, from_, to):
"""Gets a ``ProficiencyList`` effecyive during the entire given date range inclusive but not confined to the date range.
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.learning.ProficiencyList) - the returned
``Proficiency`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``from`` or ``to`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_on_date
proficiency_list = []
for proficiency in self.get_proficiencies():
if overlap(from_, to, proficiency.start_date, proficiency.end_date):
proficiency_list.append(proficiency) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['proficiency']]
return objects.ProficiencyList(proficiency_list, runtime=self._runtime) |
def set(self, key, val, env=None):
"""
Changes the value for the setting specified by 'key' to the new value.
By default this will change the current environment, but you can change
values in other environments by passing the name of that environment as
the 'env' parameter.
"""
if env is None:
env = self.environment
else:
if env not in self._settings:
raise exc.EnvironmentNotFound("There is no environment named "
"'%s'." % env)
dct = self._settings[env]
if key not in dct:
raise exc.InvalidSetting("The setting '%s' is not defined." % key)
dct[key] = val
if key == "identity_type":
# If setting the identity_type, also change the identity_class.
dct["identity_class"] = _import_identity(val)
elif key == "region":
if not identity:
return
current = identity.region
if current == val:
return
if "LON" in (current, val):
# This is an outlier, as it has a separate auth
identity.region = val
elif key == "verify_ssl":
if not identity:
return
identity.verify_ssl = val | def function[set, parameter[self, key, val, env]]:
constant[
Changes the value for the setting specified by 'key' to the new value.
By default this will change the current environment, but you can change
values in other environments by passing the name of that environment as
the 'env' parameter.
]
if compare[name[env] is constant[None]] begin[:]
variable[env] assign[=] name[self].environment
variable[dct] assign[=] call[name[self]._settings][name[env]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[dct]] begin[:]
<ast.Raise object at 0x7da1b056f3d0>
call[name[dct]][name[key]] assign[=] name[val]
if compare[name[key] equal[==] constant[identity_type]] begin[:]
call[name[dct]][constant[identity_class]] assign[=] call[name[_import_identity], parameter[name[val]]] | keyword[def] identifier[set] ( identifier[self] , identifier[key] , identifier[val] , identifier[env] = keyword[None] ):
literal[string]
keyword[if] identifier[env] keyword[is] keyword[None] :
identifier[env] = identifier[self] . identifier[environment]
keyword[else] :
keyword[if] identifier[env] keyword[not] keyword[in] identifier[self] . identifier[_settings] :
keyword[raise] identifier[exc] . identifier[EnvironmentNotFound] ( literal[string]
literal[string] % identifier[env] )
identifier[dct] = identifier[self] . identifier[_settings] [ identifier[env] ]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[dct] :
keyword[raise] identifier[exc] . identifier[InvalidSetting] ( literal[string] % identifier[key] )
identifier[dct] [ identifier[key] ]= identifier[val]
keyword[if] identifier[key] == literal[string] :
identifier[dct] [ literal[string] ]= identifier[_import_identity] ( identifier[val] )
keyword[elif] identifier[key] == literal[string] :
keyword[if] keyword[not] identifier[identity] :
keyword[return]
identifier[current] = identifier[identity] . identifier[region]
keyword[if] identifier[current] == identifier[val] :
keyword[return]
keyword[if] literal[string] keyword[in] ( identifier[current] , identifier[val] ):
identifier[identity] . identifier[region] = identifier[val]
keyword[elif] identifier[key] == literal[string] :
keyword[if] keyword[not] identifier[identity] :
keyword[return]
identifier[identity] . identifier[verify_ssl] = identifier[val] | def set(self, key, val, env=None):
"""
Changes the value for the setting specified by 'key' to the new value.
By default this will change the current environment, but you can change
values in other environments by passing the name of that environment as
the 'env' parameter.
"""
if env is None:
env = self.environment # depends on [control=['if'], data=['env']]
elif env not in self._settings:
raise exc.EnvironmentNotFound("There is no environment named '%s'." % env) # depends on [control=['if'], data=['env']]
dct = self._settings[env]
if key not in dct:
raise exc.InvalidSetting("The setting '%s' is not defined." % key) # depends on [control=['if'], data=['key']]
dct[key] = val
if key == 'identity_type':
# If setting the identity_type, also change the identity_class.
dct['identity_class'] = _import_identity(val) # depends on [control=['if'], data=[]]
elif key == 'region':
if not identity:
return # depends on [control=['if'], data=[]]
current = identity.region
if current == val:
return # depends on [control=['if'], data=[]]
if 'LON' in (current, val):
# This is an outlier, as it has a separate auth
identity.region = val # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif key == 'verify_ssl':
if not identity:
return # depends on [control=['if'], data=[]]
identity.verify_ssl = val # depends on [control=['if'], data=[]] |
def load_config_from_json(self):
"""load config from existing json connector files."""
c = self.config
self.log.debug("loading config from JSON")
# load from engine config
fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
cfg = json.loads(f.read())
key = cfg['exec_key']
# json gives unicode, Session.key wants bytes
c.Session.key = key.encode('ascii')
xport,addr = cfg['url'].split('://')
c.HubFactory.engine_transport = xport
ip,ports = addr.split(':')
c.HubFactory.engine_ip = ip
c.HubFactory.regport = int(ports)
self.location = cfg['location']
if not self.engine_ssh_server:
self.engine_ssh_server = cfg['ssh']
# load client config
fname = os.path.join(self.profile_dir.security_dir, self.client_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
cfg = json.loads(f.read())
assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys"
xport,addr = cfg['url'].split('://')
c.HubFactory.client_transport = xport
ip,ports = addr.split(':')
c.HubFactory.client_ip = ip
if not self.ssh_server:
self.ssh_server = cfg['ssh']
assert int(ports) == c.HubFactory.regport, "regport mismatch" | def function[load_config_from_json, parameter[self]]:
constant[load config from existing json connector files.]
variable[c] assign[=] name[self].config
call[name[self].log.debug, parameter[constant[loading config from JSON]]]
variable[fname] assign[=] call[name[os].path.join, parameter[name[self].profile_dir.security_dir, name[self].engine_json_file]]
call[name[self].log.info, parameter[constant[loading connection info from %s], name[fname]]]
with call[name[open], parameter[name[fname]]] begin[:]
variable[cfg] assign[=] call[name[json].loads, parameter[call[name[f].read, parameter[]]]]
variable[key] assign[=] call[name[cfg]][constant[exec_key]]
name[c].Session.key assign[=] call[name[key].encode, parameter[constant[ascii]]]
<ast.Tuple object at 0x7da18ede49d0> assign[=] call[call[name[cfg]][constant[url]].split, parameter[constant[://]]]
name[c].HubFactory.engine_transport assign[=] name[xport]
<ast.Tuple object at 0x7da18ede4880> assign[=] call[name[addr].split, parameter[constant[:]]]
name[c].HubFactory.engine_ip assign[=] name[ip]
name[c].HubFactory.regport assign[=] call[name[int], parameter[name[ports]]]
name[self].location assign[=] call[name[cfg]][constant[location]]
if <ast.UnaryOp object at 0x7da18ede5a20> begin[:]
name[self].engine_ssh_server assign[=] call[name[cfg]][constant[ssh]]
variable[fname] assign[=] call[name[os].path.join, parameter[name[self].profile_dir.security_dir, name[self].client_json_file]]
call[name[self].log.info, parameter[constant[loading connection info from %s], name[fname]]]
with call[name[open], parameter[name[fname]]] begin[:]
variable[cfg] assign[=] call[name[json].loads, parameter[call[name[f].read, parameter[]]]]
assert[compare[name[key] equal[==] call[name[cfg]][constant[exec_key]]]]
<ast.Tuple object at 0x7da20eb2b250> assign[=] call[call[name[cfg]][constant[url]].split, parameter[constant[://]]]
name[c].HubFactory.client_transport assign[=] name[xport]
<ast.Tuple object at 0x7da20eb294b0> assign[=] call[name[addr].split, parameter[constant[:]]]
name[c].HubFactory.client_ip assign[=] name[ip]
if <ast.UnaryOp object at 0x7da18ede5db0> begin[:]
name[self].ssh_server assign[=] call[name[cfg]][constant[ssh]]
assert[compare[call[name[int], parameter[name[ports]]] equal[==] name[c].HubFactory.regport]] | keyword[def] identifier[load_config_from_json] ( identifier[self] ):
literal[string]
identifier[c] = identifier[self] . identifier[config]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[profile_dir] . identifier[security_dir] , identifier[self] . identifier[engine_json_file] )
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[fname] )
keyword[with] identifier[open] ( identifier[fname] ) keyword[as] identifier[f] :
identifier[cfg] = identifier[json] . identifier[loads] ( identifier[f] . identifier[read] ())
identifier[key] = identifier[cfg] [ literal[string] ]
identifier[c] . identifier[Session] . identifier[key] = identifier[key] . identifier[encode] ( literal[string] )
identifier[xport] , identifier[addr] = identifier[cfg] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[c] . identifier[HubFactory] . identifier[engine_transport] = identifier[xport]
identifier[ip] , identifier[ports] = identifier[addr] . identifier[split] ( literal[string] )
identifier[c] . identifier[HubFactory] . identifier[engine_ip] = identifier[ip]
identifier[c] . identifier[HubFactory] . identifier[regport] = identifier[int] ( identifier[ports] )
identifier[self] . identifier[location] = identifier[cfg] [ literal[string] ]
keyword[if] keyword[not] identifier[self] . identifier[engine_ssh_server] :
identifier[self] . identifier[engine_ssh_server] = identifier[cfg] [ literal[string] ]
identifier[fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[profile_dir] . identifier[security_dir] , identifier[self] . identifier[client_json_file] )
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[fname] )
keyword[with] identifier[open] ( identifier[fname] ) keyword[as] identifier[f] :
identifier[cfg] = identifier[json] . identifier[loads] ( identifier[f] . identifier[read] ())
keyword[assert] identifier[key] == identifier[cfg] [ literal[string] ], literal[string]
identifier[xport] , identifier[addr] = identifier[cfg] [ literal[string] ]. identifier[split] ( literal[string] )
identifier[c] . identifier[HubFactory] . identifier[client_transport] = identifier[xport]
identifier[ip] , identifier[ports] = identifier[addr] . identifier[split] ( literal[string] )
identifier[c] . identifier[HubFactory] . identifier[client_ip] = identifier[ip]
keyword[if] keyword[not] identifier[self] . identifier[ssh_server] :
identifier[self] . identifier[ssh_server] = identifier[cfg] [ literal[string] ]
keyword[assert] identifier[int] ( identifier[ports] )== identifier[c] . identifier[HubFactory] . identifier[regport] , literal[string] | def load_config_from_json(self):
"""load config from existing json connector files."""
c = self.config
self.log.debug('loading config from JSON')
# load from engine config
fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file)
self.log.info('loading connection info from %s', fname)
with open(fname) as f:
cfg = json.loads(f.read()) # depends on [control=['with'], data=['f']]
key = cfg['exec_key']
# json gives unicode, Session.key wants bytes
c.Session.key = key.encode('ascii')
(xport, addr) = cfg['url'].split('://')
c.HubFactory.engine_transport = xport
(ip, ports) = addr.split(':')
c.HubFactory.engine_ip = ip
c.HubFactory.regport = int(ports)
self.location = cfg['location']
if not self.engine_ssh_server:
self.engine_ssh_server = cfg['ssh'] # depends on [control=['if'], data=[]]
# load client config
fname = os.path.join(self.profile_dir.security_dir, self.client_json_file)
self.log.info('loading connection info from %s', fname)
with open(fname) as f:
cfg = json.loads(f.read()) # depends on [control=['with'], data=['f']]
assert key == cfg['exec_key'], 'exec_key mismatch between engine and client keys'
(xport, addr) = cfg['url'].split('://')
c.HubFactory.client_transport = xport
(ip, ports) = addr.split(':')
c.HubFactory.client_ip = ip
if not self.ssh_server:
self.ssh_server = cfg['ssh'] # depends on [control=['if'], data=[]]
assert int(ports) == c.HubFactory.regport, 'regport mismatch' |
async def login_user(self, password, **kwds):
"""
This function handles the registration of the given user credentials in the database
"""
# find the matching user with the given email
user_data = (await self._get_matching_user(fields=list(kwds.keys()), **kwds))['data']
try:
# look for a matching entry in the local database
passwordEntry = self.model.select().where(
self.model.user == user_data[root_query()][0]['pk']
)[0]
# if we couldn't acess the id of the result
except (KeyError, IndexError) as e:
# yell loudly
raise RuntimeError('Could not find matching registered user')
# if the given password matches the stored hash
if passwordEntry and passwordEntry.password == password:
# the remote entry for the user
user = user_data[root_query()][0]
# then return a dictionary with the user and sessionToken
return {
'user': user,
'sessionToken': self._user_session_token(user)
}
# otherwise the passwords don't match
raise RuntimeError("Incorrect credentials") | <ast.AsyncFunctionDef object at 0x7da18f00ed70> | keyword[async] keyword[def] identifier[login_user] ( identifier[self] , identifier[password] ,** identifier[kwds] ):
literal[string]
identifier[user_data] =( keyword[await] identifier[self] . identifier[_get_matching_user] ( identifier[fields] = identifier[list] ( identifier[kwds] . identifier[keys] ()),** identifier[kwds] ))[ literal[string] ]
keyword[try] :
identifier[passwordEntry] = identifier[self] . identifier[model] . identifier[select] (). identifier[where] (
identifier[self] . identifier[model] . identifier[user] == identifier[user_data] [ identifier[root_query] ()][ literal[int] ][ literal[string] ]
)[ literal[int] ]
keyword[except] ( identifier[KeyError] , identifier[IndexError] ) keyword[as] identifier[e] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[passwordEntry] keyword[and] identifier[passwordEntry] . identifier[password] == identifier[password] :
identifier[user] = identifier[user_data] [ identifier[root_query] ()][ literal[int] ]
keyword[return] {
literal[string] : identifier[user] ,
literal[string] : identifier[self] . identifier[_user_session_token] ( identifier[user] )
}
keyword[raise] identifier[RuntimeError] ( literal[string] ) | async def login_user(self, password, **kwds):
"""
This function handles the registration of the given user credentials in the database
"""
# find the matching user with the given email
user_data = (await self._get_matching_user(fields=list(kwds.keys()), **kwds))['data']
try:
# look for a matching entry in the local database
passwordEntry = self.model.select().where(self.model.user == user_data[root_query()][0]['pk'])[0] # depends on [control=['try'], data=[]]
# if we couldn't acess the id of the result
except (KeyError, IndexError) as e:
# yell loudly
raise RuntimeError('Could not find matching registered user') # depends on [control=['except'], data=[]]
# if the given password matches the stored hash
if passwordEntry and passwordEntry.password == password:
# the remote entry for the user
user = user_data[root_query()][0]
# then return a dictionary with the user and sessionToken
return {'user': user, 'sessionToken': self._user_session_token(user)} # depends on [control=['if'], data=[]]
# otherwise the passwords don't match
raise RuntimeError('Incorrect credentials') |
def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http',
sslflags=None):
'''
Create an IIS Web Binding.
.. note::
This function only validates against the binding
ipaddress:port:hostheader combination, and will return True even if the
binding already exists with a different configuration. It will not
modify the configuration of an existing binding.
Args:
site (str): The IIS site name.
hostheader (str): The host header of the binding. Usually a hostname.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
protocol (str): The application protocol of the binding.
sslflags (str): The flags representing certificate type and storage of
the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_binding site='site0' hostheader='example.com' ipaddress='*' port='80'
'''
protocol = six.text_type(protocol).lower()
name = _get_binding_info(hostheader, ipaddress, port)
if protocol not in _VALID_PROTOCOLS:
message = ("Invalid protocol '{0}' specified. Valid formats:"
' {1}').format(protocol, _VALID_PROTOCOLS)
raise SaltInvocationError(message)
if sslflags:
sslflags = int(sslflags)
if sslflags not in _VALID_SSL_FLAGS:
message = ("Invalid sslflags '{0}' specified. Valid sslflags range:"
' {1}..{2}').format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[-1])
raise SaltInvocationError(message)
current_bindings = list_bindings(site)
if name in current_bindings:
log.debug('Binding already present: %s', name)
return True
if sslflags:
ps_cmd = ['New-WebBinding',
'-Name', "'{0}'".format(site),
'-HostHeader', "'{0}'".format(hostheader),
'-IpAddress', "'{0}'".format(ipaddress),
'-Port', "'{0}'".format(port),
'-Protocol', "'{0}'".format(protocol),
'-SslFlags', '{0}'.format(sslflags)]
else:
ps_cmd = ['New-WebBinding',
'-Name', "'{0}'".format(site),
'-HostHeader', "'{0}'".format(hostheader),
'-IpAddress', "'{0}'".format(ipaddress),
'-Port', "'{0}'".format(port),
'-Protocol', "'{0}'".format(protocol)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create binding: {0}\nError: {1}' \
''.format(site, cmd_ret['stderr'])
raise CommandExecutionError(msg)
if name in list_bindings(site):
log.debug('Binding created successfully: %s', site)
return True
log.error('Unable to create binding: %s', site)
return False | def function[create_binding, parameter[site, hostheader, ipaddress, port, protocol, sslflags]]:
constant[
Create an IIS Web Binding.
.. note::
This function only validates against the binding
ipaddress:port:hostheader combination, and will return True even if the
binding already exists with a different configuration. It will not
modify the configuration of an existing binding.
Args:
site (str): The IIS site name.
hostheader (str): The host header of the binding. Usually a hostname.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
protocol (str): The application protocol of the binding.
sslflags (str): The flags representing certificate type and storage of
the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_binding site='site0' hostheader='example.com' ipaddress='*' port='80'
]
variable[protocol] assign[=] call[call[name[six].text_type, parameter[name[protocol]]].lower, parameter[]]
variable[name] assign[=] call[name[_get_binding_info], parameter[name[hostheader], name[ipaddress], name[port]]]
if compare[name[protocol] <ast.NotIn object at 0x7da2590d7190> name[_VALID_PROTOCOLS]] begin[:]
variable[message] assign[=] call[constant[Invalid protocol '{0}' specified. Valid formats: {1}].format, parameter[name[protocol], name[_VALID_PROTOCOLS]]]
<ast.Raise object at 0x7da1b1c07400>
if name[sslflags] begin[:]
variable[sslflags] assign[=] call[name[int], parameter[name[sslflags]]]
if compare[name[sslflags] <ast.NotIn object at 0x7da2590d7190> name[_VALID_SSL_FLAGS]] begin[:]
variable[message] assign[=] call[constant[Invalid sslflags '{0}' specified. Valid sslflags range: {1}..{2}].format, parameter[name[sslflags], call[name[_VALID_SSL_FLAGS]][constant[0]], call[name[_VALID_SSL_FLAGS]][<ast.UnaryOp object at 0x7da1b1c05de0>]]]
<ast.Raise object at 0x7da1b1c05f00>
variable[current_bindings] assign[=] call[name[list_bindings], parameter[name[site]]]
if compare[name[name] in name[current_bindings]] begin[:]
call[name[log].debug, parameter[constant[Binding already present: %s], name[name]]]
return[constant[True]]
if name[sslflags] begin[:]
variable[ps_cmd] assign[=] list[[<ast.Constant object at 0x7da1b1c043a0>, <ast.Constant object at 0x7da1b1c051e0>, <ast.Call object at 0x7da1b1c052d0>, <ast.Constant object at 0x7da1b1c07610>, <ast.Call object at 0x7da1b1c05c60>, <ast.Constant object at 0x7da1b1c05660>, <ast.Call object at 0x7da1b1c07100>, <ast.Constant object at 0x7da1b1c07670>, <ast.Call object at 0x7da1b1c055a0>, <ast.Constant object at 0x7da1b1c06260>, <ast.Call object at 0x7da1b1c06e60>, <ast.Constant object at 0x7da1b1c07970>, <ast.Call object at 0x7da1b1c04160>]]
variable[cmd_ret] assign[=] call[name[_srvmgr], parameter[name[ps_cmd]]]
if compare[call[name[cmd_ret]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
variable[msg] assign[=] call[constant[Unable to create binding: {0}
Error: {1}].format, parameter[name[site], call[name[cmd_ret]][constant[stderr]]]]
<ast.Raise object at 0x7da1b1f837c0>
if compare[name[name] in call[name[list_bindings], parameter[name[site]]]] begin[:]
call[name[log].debug, parameter[constant[Binding created successfully: %s], name[site]]]
return[constant[True]]
call[name[log].error, parameter[constant[Unable to create binding: %s], name[site]]]
return[constant[False]] | keyword[def] identifier[create_binding] ( identifier[site] , identifier[hostheader] = literal[string] , identifier[ipaddress] = literal[string] , identifier[port] = literal[int] , identifier[protocol] = literal[string] ,
identifier[sslflags] = keyword[None] ):
literal[string]
identifier[protocol] = identifier[six] . identifier[text_type] ( identifier[protocol] ). identifier[lower] ()
identifier[name] = identifier[_get_binding_info] ( identifier[hostheader] , identifier[ipaddress] , identifier[port] )
keyword[if] identifier[protocol] keyword[not] keyword[in] identifier[_VALID_PROTOCOLS] :
identifier[message] =( literal[string]
literal[string] ). identifier[format] ( identifier[protocol] , identifier[_VALID_PROTOCOLS] )
keyword[raise] identifier[SaltInvocationError] ( identifier[message] )
keyword[if] identifier[sslflags] :
identifier[sslflags] = identifier[int] ( identifier[sslflags] )
keyword[if] identifier[sslflags] keyword[not] keyword[in] identifier[_VALID_SSL_FLAGS] :
identifier[message] =( literal[string]
literal[string] ). identifier[format] ( identifier[sslflags] , identifier[_VALID_SSL_FLAGS] [ literal[int] ], identifier[_VALID_SSL_FLAGS] [- literal[int] ])
keyword[raise] identifier[SaltInvocationError] ( identifier[message] )
identifier[current_bindings] = identifier[list_bindings] ( identifier[site] )
keyword[if] identifier[name] keyword[in] identifier[current_bindings] :
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] keyword[True]
keyword[if] identifier[sslflags] :
identifier[ps_cmd] =[ literal[string] ,
literal[string] , literal[string] . identifier[format] ( identifier[site] ),
literal[string] , literal[string] . identifier[format] ( identifier[hostheader] ),
literal[string] , literal[string] . identifier[format] ( identifier[ipaddress] ),
literal[string] , literal[string] . identifier[format] ( identifier[port] ),
literal[string] , literal[string] . identifier[format] ( identifier[protocol] ),
literal[string] , literal[string] . identifier[format] ( identifier[sslflags] )]
keyword[else] :
identifier[ps_cmd] =[ literal[string] ,
literal[string] , literal[string] . identifier[format] ( identifier[site] ),
literal[string] , literal[string] . identifier[format] ( identifier[hostheader] ),
literal[string] , literal[string] . identifier[format] ( identifier[ipaddress] ),
literal[string] , literal[string] . identifier[format] ( identifier[port] ),
literal[string] , literal[string] . identifier[format] ( identifier[protocol] )]
identifier[cmd_ret] = identifier[_srvmgr] ( identifier[ps_cmd] )
keyword[if] identifier[cmd_ret] [ literal[string] ]!= literal[int] :
identifier[msg] = literal[string] literal[string] . identifier[format] ( identifier[site] , identifier[cmd_ret] [ literal[string] ])
keyword[raise] identifier[CommandExecutionError] ( identifier[msg] )
keyword[if] identifier[name] keyword[in] identifier[list_bindings] ( identifier[site] ):
identifier[log] . identifier[debug] ( literal[string] , identifier[site] )
keyword[return] keyword[True]
identifier[log] . identifier[error] ( literal[string] , identifier[site] )
keyword[return] keyword[False] | def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http', sslflags=None):
"""
Create an IIS Web Binding.
.. note::
This function only validates against the binding
ipaddress:port:hostheader combination, and will return True even if the
binding already exists with a different configuration. It will not
modify the configuration of an existing binding.
Args:
site (str): The IIS site name.
hostheader (str): The host header of the binding. Usually a hostname.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
protocol (str): The application protocol of the binding.
sslflags (str): The flags representing certificate type and storage of
the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_binding site='site0' hostheader='example.com' ipaddress='*' port='80'
"""
protocol = six.text_type(protocol).lower()
name = _get_binding_info(hostheader, ipaddress, port)
if protocol not in _VALID_PROTOCOLS:
message = "Invalid protocol '{0}' specified. Valid formats: {1}".format(protocol, _VALID_PROTOCOLS)
raise SaltInvocationError(message) # depends on [control=['if'], data=['protocol', '_VALID_PROTOCOLS']]
if sslflags:
sslflags = int(sslflags)
if sslflags not in _VALID_SSL_FLAGS:
message = "Invalid sslflags '{0}' specified. Valid sslflags range: {1}..{2}".format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[-1])
raise SaltInvocationError(message) # depends on [control=['if'], data=['sslflags', '_VALID_SSL_FLAGS']] # depends on [control=['if'], data=[]]
current_bindings = list_bindings(site)
if name in current_bindings:
log.debug('Binding already present: %s', name)
return True # depends on [control=['if'], data=['name']]
if sslflags:
ps_cmd = ['New-WebBinding', '-Name', "'{0}'".format(site), '-HostHeader', "'{0}'".format(hostheader), '-IpAddress', "'{0}'".format(ipaddress), '-Port', "'{0}'".format(port), '-Protocol', "'{0}'".format(protocol), '-SslFlags', '{0}'.format(sslflags)] # depends on [control=['if'], data=[]]
else:
ps_cmd = ['New-WebBinding', '-Name', "'{0}'".format(site), '-HostHeader', "'{0}'".format(hostheader), '-IpAddress', "'{0}'".format(ipaddress), '-Port', "'{0}'".format(port), '-Protocol', "'{0}'".format(protocol)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create binding: {0}\nError: {1}'.format(site, cmd_ret['stderr'])
raise CommandExecutionError(msg) # depends on [control=['if'], data=[]]
if name in list_bindings(site):
log.debug('Binding created successfully: %s', site)
return True # depends on [control=['if'], data=[]]
log.error('Unable to create binding: %s', site)
return False |
def create_context(id_type=None, env=None, username=None, password=None,
tenant_id=None, tenant_name=None, api_key=None, verify_ssl=None):
"""
Returns an instance of the specified identity class, or if none is
specified, an instance of the current setting for 'identity_class'.
You may optionally set the environment by passing the name of that
environment in the 'env' parameter.
"""
if env:
set_environment(env)
return _create_identity(id_type=id_type, username=username,
password=password, tenant_id=tenant_id, tenant_name=tenant_name,
api_key=api_key, verify_ssl=verify_ssl, return_context=True) | def function[create_context, parameter[id_type, env, username, password, tenant_id, tenant_name, api_key, verify_ssl]]:
constant[
Returns an instance of the specified identity class, or if none is
specified, an instance of the current setting for 'identity_class'.
You may optionally set the environment by passing the name of that
environment in the 'env' parameter.
]
if name[env] begin[:]
call[name[set_environment], parameter[name[env]]]
return[call[name[_create_identity], parameter[]]] | keyword[def] identifier[create_context] ( identifier[id_type] = keyword[None] , identifier[env] = keyword[None] , identifier[username] = keyword[None] , identifier[password] = keyword[None] ,
identifier[tenant_id] = keyword[None] , identifier[tenant_name] = keyword[None] , identifier[api_key] = keyword[None] , identifier[verify_ssl] = keyword[None] ):
literal[string]
keyword[if] identifier[env] :
identifier[set_environment] ( identifier[env] )
keyword[return] identifier[_create_identity] ( identifier[id_type] = identifier[id_type] , identifier[username] = identifier[username] ,
identifier[password] = identifier[password] , identifier[tenant_id] = identifier[tenant_id] , identifier[tenant_name] = identifier[tenant_name] ,
identifier[api_key] = identifier[api_key] , identifier[verify_ssl] = identifier[verify_ssl] , identifier[return_context] = keyword[True] ) | def create_context(id_type=None, env=None, username=None, password=None, tenant_id=None, tenant_name=None, api_key=None, verify_ssl=None):
"""
Returns an instance of the specified identity class, or if none is
specified, an instance of the current setting for 'identity_class'.
You may optionally set the environment by passing the name of that
environment in the 'env' parameter.
"""
if env:
set_environment(env) # depends on [control=['if'], data=[]]
return _create_identity(id_type=id_type, username=username, password=password, tenant_id=tenant_id, tenant_name=tenant_name, api_key=api_key, verify_ssl=verify_ssl, return_context=True) |
def summary_stats(x, y, nm=None):
"""
Compute summary statistics for paired x, y data.
Tests
-----
Parameters
----------
x, y : array-like
Data to compare
nm : str (optional)
Index value of created dataframe.
Returns
-------
pandas dataframe of statistics.
"""
# create datafrane for results
if isinstance(nm, str):
nm = [nm]
# cols = pd.MultiIndex.from_arrays([['', 'Pairwise', 'Pairwise', cat, cat, cat, cat],
# ['N', 'W', 'p', 'Median', 'IQR', 'W', 'p']])
# cols = ['Median', 'IQR', 'CI95', 'L95', 'LQ', 'UQ', 'U95', 'N',
# 'Wilcoxon_stat', 'Wilcoxon_p',
# 'KS_stat', 'KS_p',
# 'LR_slope', 'LR_intercept', 'LR_slope_tvalue', 'LR_intercept_tvalue', 'LR_slope_p', 'LR_intercept_p', 'LR_R2adj']
# out = pd.DataFrame(index=nm, columns=cols)
cols = pd.MultiIndex.from_tuples([('Residual Summary', 'N'),
('Residual Summary', 'Median'),
('Residual Summary', 'LQ'),
('Residual Summary', 'IQR'),
('Residual Summary', 'UQ'),
('Residual Regression', 'Slope'),
('Residual Regression', 'Slope t'),
('Residual Regression', 'Slope p'),
('Residual Regression', 'Intercept'),
('Residual Regression', 'Intercept t'),
('Residual Regression', 'Intercept p'),
('Residual Regression', 'R2'),
('Kolmogorov-Smirnov', 'KS'),
('Kolmogorov-Smirnov', 'p')])
out = pd.DataFrame(index=nm, columns=cols)
# remove nan values
ind = ~(np.isnan(x) | np.isnan(y))
x = x[ind]
y = y[ind]
# calculate residuals
r = y - x
# summary statistics
cat = 'Residual Summary'
out.loc[:, (cat, 'N')] = len(x)
out.loc[:, (cat, 'Median')] = np.median(r)
out.loc[:, [(cat, 'LQ'), (cat, 'UQ')]] = np.percentile(r, [25, 75])
out.loc[:, (cat, 'IQR')] = out.loc[:, (cat, 'UQ')] - out.loc[:, (cat, 'LQ')]
# non-paired test for same distribution
cat = 'Kolmogorov-Smirnov'
ks = stats.ks_2samp(x, y)
out.loc[:, (cat, 'KS')] = ks.statistic
out.loc[:, (cat, 'p')] = ks.pvalue
# regression analysis of residuals - slope should be 0, intercept should be 0
cat = 'Residual Regression'
X = sm.add_constant(x)
reg = sm.OLS(r, X, missing='drop')
fit = reg.fit()
out.loc[:, [(cat, 'Intercept'), (cat, 'Slope')]] = fit.params
out.loc[:, [(cat, 'Intercept t'), (cat, 'Slope t')]] = fit.tvalues
out.loc[:, (cat, 'R2')] = fit.rsquared
out.loc[:, [(cat, 'Intercept p'), (cat, 'Slope p')]] = fit.pvalues
return out | def function[summary_stats, parameter[x, y, nm]]:
constant[
Compute summary statistics for paired x, y data.
Tests
-----
Parameters
----------
x, y : array-like
Data to compare
nm : str (optional)
Index value of created dataframe.
Returns
-------
pandas dataframe of statistics.
]
if call[name[isinstance], parameter[name[nm], name[str]]] begin[:]
variable[nm] assign[=] list[[<ast.Name object at 0x7da1b0150490>]]
variable[cols] assign[=] call[name[pd].MultiIndex.from_tuples, parameter[list[[<ast.Tuple object at 0x7da1b0150610>, <ast.Tuple object at 0x7da1b01506a0>, <ast.Tuple object at 0x7da1b0150730>, <ast.Tuple object at 0x7da1b01507c0>, <ast.Tuple object at 0x7da1b0150850>, <ast.Tuple object at 0x7da1b01508e0>, <ast.Tuple object at 0x7da1b0150970>, <ast.Tuple object at 0x7da1b0150a00>, <ast.Tuple object at 0x7da1b0150a90>, <ast.Tuple object at 0x7da1b0150b20>, <ast.Tuple object at 0x7da1b0150bb0>, <ast.Tuple object at 0x7da1b0150c40>, <ast.Tuple object at 0x7da1b0150cd0>, <ast.Tuple object at 0x7da1b0150d60>]]]]
variable[out] assign[=] call[name[pd].DataFrame, parameter[]]
variable[ind] assign[=] <ast.UnaryOp object at 0x7da1b0151000>
variable[x] assign[=] call[name[x]][name[ind]]
variable[y] assign[=] call[name[y]][name[ind]]
variable[r] assign[=] binary_operation[name[y] - name[x]]
variable[cat] assign[=] constant[Residual Summary]
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0151630>, <ast.Tuple object at 0x7da1b0151660>]]] assign[=] call[name[len], parameter[name[x]]]
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0151870>, <ast.Tuple object at 0x7da1b01518a0>]]] assign[=] call[name[np].median, parameter[name[r]]]
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0152380>, <ast.List object at 0x7da1b01523b0>]]] assign[=] call[name[np].percentile, parameter[name[r], list[[<ast.Constant object at 0x7da1b0151b10>, <ast.Constant object at 0x7da1b0151b40>]]]]
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0151c60>, <ast.Tuple object at 0x7da1b0151c90>]]] assign[=] binary_operation[call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0151e10>, <ast.Tuple object at 0x7da1b0151e40>]]] - call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0151f90>, <ast.Tuple object at 0x7da1b0151fc0>]]]]
variable[cat] assign[=] constant[Kolmogorov-Smirnov]
variable[ks] assign[=] call[name[stats].ks_2samp, parameter[name[x], name[y]]]
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0153e80>, <ast.Tuple object at 0x7da1b0153e50>]]] assign[=] name[ks].statistic
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0153c70>, <ast.Tuple object at 0x7da1b0153c40>]]] assign[=] name[ks].pvalue
variable[cat] assign[=] constant[Residual Regression]
variable[X] assign[=] call[name[sm].add_constant, parameter[name[x]]]
variable[reg] assign[=] call[name[sm].OLS, parameter[name[r], name[X]]]
variable[fit] assign[=] call[name[reg].fit, parameter[]]
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0152f50>, <ast.List object at 0x7da1b0152f20>]]] assign[=] name[fit].params
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0152c80>, <ast.List object at 0x7da1b0152c50>]]] assign[=] name[fit].tvalues
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b01529b0>, <ast.Tuple object at 0x7da1b0152980>]]] assign[=] name[fit].rsquared
call[name[out].loc][tuple[[<ast.Slice object at 0x7da1b0152560>, <ast.List object at 0x7da1b0152590>]]] assign[=] name[fit].pvalues
return[name[out]] | keyword[def] identifier[summary_stats] ( identifier[x] , identifier[y] , identifier[nm] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[nm] , identifier[str] ):
identifier[nm] =[ identifier[nm] ]
identifier[cols] = identifier[pd] . identifier[MultiIndex] . identifier[from_tuples] ([( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] )])
identifier[out] = identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[nm] , identifier[columns] = identifier[cols] )
identifier[ind] =~( identifier[np] . identifier[isnan] ( identifier[x] )| identifier[np] . identifier[isnan] ( identifier[y] ))
identifier[x] = identifier[x] [ identifier[ind] ]
identifier[y] = identifier[y] [ identifier[ind] ]
identifier[r] = identifier[y] - identifier[x]
identifier[cat] = literal[string]
identifier[out] . identifier[loc] [:,( identifier[cat] , literal[string] )]= identifier[len] ( identifier[x] )
identifier[out] . identifier[loc] [:,( identifier[cat] , literal[string] )]= identifier[np] . identifier[median] ( identifier[r] )
identifier[out] . identifier[loc] [:,[( identifier[cat] , literal[string] ),( identifier[cat] , literal[string] )]]= identifier[np] . identifier[percentile] ( identifier[r] ,[ literal[int] , literal[int] ])
identifier[out] . identifier[loc] [:,( identifier[cat] , literal[string] )]= identifier[out] . identifier[loc] [:,( identifier[cat] , literal[string] )]- identifier[out] . identifier[loc] [:,( identifier[cat] , literal[string] )]
identifier[cat] = literal[string]
identifier[ks] = identifier[stats] . identifier[ks_2samp] ( identifier[x] , identifier[y] )
identifier[out] . identifier[loc] [:,( identifier[cat] , literal[string] )]= identifier[ks] . identifier[statistic]
identifier[out] . identifier[loc] [:,( identifier[cat] , literal[string] )]= identifier[ks] . identifier[pvalue]
identifier[cat] = literal[string]
identifier[X] = identifier[sm] . identifier[add_constant] ( identifier[x] )
identifier[reg] = identifier[sm] . identifier[OLS] ( identifier[r] , identifier[X] , identifier[missing] = literal[string] )
identifier[fit] = identifier[reg] . identifier[fit] ()
identifier[out] . identifier[loc] [:,[( identifier[cat] , literal[string] ),( identifier[cat] , literal[string] )]]= identifier[fit] . identifier[params]
identifier[out] . identifier[loc] [:,[( identifier[cat] , literal[string] ),( identifier[cat] , literal[string] )]]= identifier[fit] . identifier[tvalues]
identifier[out] . identifier[loc] [:,( identifier[cat] , literal[string] )]= identifier[fit] . identifier[rsquared]
identifier[out] . identifier[loc] [:,[( identifier[cat] , literal[string] ),( identifier[cat] , literal[string] )]]= identifier[fit] . identifier[pvalues]
keyword[return] identifier[out] | def summary_stats(x, y, nm=None):
"""
Compute summary statistics for paired x, y data.
Tests
-----
Parameters
----------
x, y : array-like
Data to compare
nm : str (optional)
Index value of created dataframe.
Returns
-------
pandas dataframe of statistics.
"""
# create datafrane for results
if isinstance(nm, str):
nm = [nm] # depends on [control=['if'], data=[]]
# cols = pd.MultiIndex.from_arrays([['', 'Pairwise', 'Pairwise', cat, cat, cat, cat],
# ['N', 'W', 'p', 'Median', 'IQR', 'W', 'p']])
# cols = ['Median', 'IQR', 'CI95', 'L95', 'LQ', 'UQ', 'U95', 'N',
# 'Wilcoxon_stat', 'Wilcoxon_p',
# 'KS_stat', 'KS_p',
# 'LR_slope', 'LR_intercept', 'LR_slope_tvalue', 'LR_intercept_tvalue', 'LR_slope_p', 'LR_intercept_p', 'LR_R2adj']
# out = pd.DataFrame(index=nm, columns=cols)
cols = pd.MultiIndex.from_tuples([('Residual Summary', 'N'), ('Residual Summary', 'Median'), ('Residual Summary', 'LQ'), ('Residual Summary', 'IQR'), ('Residual Summary', 'UQ'), ('Residual Regression', 'Slope'), ('Residual Regression', 'Slope t'), ('Residual Regression', 'Slope p'), ('Residual Regression', 'Intercept'), ('Residual Regression', 'Intercept t'), ('Residual Regression', 'Intercept p'), ('Residual Regression', 'R2'), ('Kolmogorov-Smirnov', 'KS'), ('Kolmogorov-Smirnov', 'p')])
out = pd.DataFrame(index=nm, columns=cols)
# remove nan values
ind = ~(np.isnan(x) | np.isnan(y))
x = x[ind]
y = y[ind]
# calculate residuals
r = y - x
# summary statistics
cat = 'Residual Summary'
out.loc[:, (cat, 'N')] = len(x)
out.loc[:, (cat, 'Median')] = np.median(r)
out.loc[:, [(cat, 'LQ'), (cat, 'UQ')]] = np.percentile(r, [25, 75])
out.loc[:, (cat, 'IQR')] = out.loc[:, (cat, 'UQ')] - out.loc[:, (cat, 'LQ')]
# non-paired test for same distribution
cat = 'Kolmogorov-Smirnov'
ks = stats.ks_2samp(x, y)
out.loc[:, (cat, 'KS')] = ks.statistic
out.loc[:, (cat, 'p')] = ks.pvalue
# regression analysis of residuals - slope should be 0, intercept should be 0
cat = 'Residual Regression'
X = sm.add_constant(x)
reg = sm.OLS(r, X, missing='drop')
fit = reg.fit()
out.loc[:, [(cat, 'Intercept'), (cat, 'Slope')]] = fit.params
out.loc[:, [(cat, 'Intercept t'), (cat, 'Slope t')]] = fit.tvalues
out.loc[:, (cat, 'R2')] = fit.rsquared
out.loc[:, [(cat, 'Intercept p'), (cat, 'Slope p')]] = fit.pvalues
return out |
def _get_elements(self, source):
"""
Returns the list of HtmlElements for the source
:param source: The source list to parse
:type source: list
:returns: A list of HtmlElements
:rtype: list
"""
return list(chain(*[self.tree.xpath(xpath) for xpath in source])) | def function[_get_elements, parameter[self, source]]:
constant[
Returns the list of HtmlElements for the source
:param source: The source list to parse
:type source: list
:returns: A list of HtmlElements
:rtype: list
]
return[call[name[list], parameter[call[name[chain], parameter[<ast.Starred object at 0x7da204346320>]]]]] | keyword[def] identifier[_get_elements] ( identifier[self] , identifier[source] ):
literal[string]
keyword[return] identifier[list] ( identifier[chain] (*[ identifier[self] . identifier[tree] . identifier[xpath] ( identifier[xpath] ) keyword[for] identifier[xpath] keyword[in] identifier[source] ])) | def _get_elements(self, source):
"""
Returns the list of HtmlElements for the source
:param source: The source list to parse
:type source: list
:returns: A list of HtmlElements
:rtype: list
"""
return list(chain(*[self.tree.xpath(xpath) for xpath in source])) |
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True | def function[_is_unindentor, parameter[self, line]]:
constant[return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
]
if compare[call[name[len], parameter[name[self].indent_detail]] equal[==] constant[0]] begin[:]
return[constant[False]]
variable[indentor] assign[=] call[name[self].indent_detail][<ast.UnaryOp object at 0x7da1b1d06cb0>]
if compare[name[indentor] is constant[None]] begin[:]
return[constant[False]]
variable[match] assign[=] call[name[re].match, parameter[constant[^\s*(else|elif|except|finally).*\:], name[line]]]
if <ast.UnaryOp object at 0x7da1b1d05bd0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_is_unindentor] ( identifier[self] , identifier[line] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[indent_detail] )== literal[int] :
keyword[return] keyword[False]
identifier[indentor] = identifier[self] . identifier[indent_detail] [- literal[int] ]
keyword[if] identifier[indentor] keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[line] )
keyword[if] keyword[not] identifier[match] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False # depends on [control=['if'], data=[]]
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False # depends on [control=['if'], data=[]]
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match('^\\s*(else|elif|except|finally).*\\:', line)
if not match:
return False # depends on [control=['if'], data=[]]
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True |
def parse_options(args):
""" Parses command line options """
usage = 'usage: %prog [options] visio_filename image_filename'
parser = OptionParser(usage=usage)
parser.add_option('-p', '--page', action='store',
type='int', dest='pagenum',
help='pick a page by page number')
parser.add_option('-n', '--name', action='store',
type='string', dest='pagename',
help='pick a page by page name')
options, argv = parser.parse_args(args)
if options.pagenum and options.pagename:
parser.error('options --page and --name are mutually exclusive')
if len(argv) != 2:
parser.print_usage(sys.stderr)
parser.exit()
output_ext = os.path.splitext(argv[1])[1].lower()
if output_ext not in ('.gif', '.jpg', '.png'):
parser.error('Unsupported image format: %s' % argv[1])
return options, argv | def function[parse_options, parameter[args]]:
constant[ Parses command line options ]
variable[usage] assign[=] constant[usage: %prog [options] visio_filename image_filename]
variable[parser] assign[=] call[name[OptionParser], parameter[]]
call[name[parser].add_option, parameter[constant[-p], constant[--page]]]
call[name[parser].add_option, parameter[constant[-n], constant[--name]]]
<ast.Tuple object at 0x7da18c4cd2a0> assign[=] call[name[parser].parse_args, parameter[name[args]]]
if <ast.BoolOp object at 0x7da18c4ccdc0> begin[:]
call[name[parser].error, parameter[constant[options --page and --name are mutually exclusive]]]
if compare[call[name[len], parameter[name[argv]]] not_equal[!=] constant[2]] begin[:]
call[name[parser].print_usage, parameter[name[sys].stderr]]
call[name[parser].exit, parameter[]]
variable[output_ext] assign[=] call[call[call[name[os].path.splitext, parameter[call[name[argv]][constant[1]]]]][constant[1]].lower, parameter[]]
if compare[name[output_ext] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da18c4cf730>, <ast.Constant object at 0x7da18c4cc250>, <ast.Constant object at 0x7da18c4cc4f0>]]] begin[:]
call[name[parser].error, parameter[binary_operation[constant[Unsupported image format: %s] <ast.Mod object at 0x7da2590d6920> call[name[argv]][constant[1]]]]]
return[tuple[[<ast.Name object at 0x7da18c4cf820>, <ast.Name object at 0x7da18c4ce0e0>]]] | keyword[def] identifier[parse_options] ( identifier[args] ):
literal[string]
identifier[usage] = literal[string]
identifier[parser] = identifier[OptionParser] ( identifier[usage] = identifier[usage] )
identifier[parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[type] = literal[string] , identifier[dest] = literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[type] = literal[string] , identifier[dest] = literal[string] ,
identifier[help] = literal[string] )
identifier[options] , identifier[argv] = identifier[parser] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[options] . identifier[pagenum] keyword[and] identifier[options] . identifier[pagename] :
identifier[parser] . identifier[error] ( literal[string] )
keyword[if] identifier[len] ( identifier[argv] )!= literal[int] :
identifier[parser] . identifier[print_usage] ( identifier[sys] . identifier[stderr] )
identifier[parser] . identifier[exit] ()
identifier[output_ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[argv] [ literal[int] ])[ literal[int] ]. identifier[lower] ()
keyword[if] identifier[output_ext] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[parser] . identifier[error] ( literal[string] % identifier[argv] [ literal[int] ])
keyword[return] identifier[options] , identifier[argv] | def parse_options(args):
""" Parses command line options """
usage = 'usage: %prog [options] visio_filename image_filename'
parser = OptionParser(usage=usage)
parser.add_option('-p', '--page', action='store', type='int', dest='pagenum', help='pick a page by page number')
parser.add_option('-n', '--name', action='store', type='string', dest='pagename', help='pick a page by page name')
(options, argv) = parser.parse_args(args)
if options.pagenum and options.pagename:
parser.error('options --page and --name are mutually exclusive') # depends on [control=['if'], data=[]]
if len(argv) != 2:
parser.print_usage(sys.stderr)
parser.exit() # depends on [control=['if'], data=[]]
output_ext = os.path.splitext(argv[1])[1].lower()
if output_ext not in ('.gif', '.jpg', '.png'):
parser.error('Unsupported image format: %s' % argv[1]) # depends on [control=['if'], data=[]]
return (options, argv) |
def run_step(context):
"""Simple echo. Outputs context['echoMe'].
Args:
context: dictionary-like. context is mandatory.
context must contain key 'echoMe'
context['echoMe'] will echo the value to logger.
This logger could well be stdout.
When you execute the pipeline, it should look something like this:
pypyr [name here] 'echoMe=test', assuming a keyvaluepair context parser.
"""
logger.debug("started")
assert context, ("context must be set for echo. Did you set "
"'echoMe=text here'?")
context.assert_key_exists('echoMe', __name__)
if isinstance(context['echoMe'], str):
val = context.get_formatted('echoMe')
else:
val = context['echoMe']
logger.info(val)
logger.debug("done") | def function[run_step, parameter[context]]:
constant[Simple echo. Outputs context['echoMe'].
Args:
context: dictionary-like. context is mandatory.
context must contain key 'echoMe'
context['echoMe'] will echo the value to logger.
This logger could well be stdout.
When you execute the pipeline, it should look something like this:
pypyr [name here] 'echoMe=test', assuming a keyvaluepair context parser.
]
call[name[logger].debug, parameter[constant[started]]]
assert[name[context]]
call[name[context].assert_key_exists, parameter[constant[echoMe], name[__name__]]]
if call[name[isinstance], parameter[call[name[context]][constant[echoMe]], name[str]]] begin[:]
variable[val] assign[=] call[name[context].get_formatted, parameter[constant[echoMe]]]
call[name[logger].info, parameter[name[val]]]
call[name[logger].debug, parameter[constant[done]]] | keyword[def] identifier[run_step] ( identifier[context] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] )
keyword[assert] identifier[context] ,( literal[string]
literal[string] )
identifier[context] . identifier[assert_key_exists] ( literal[string] , identifier[__name__] )
keyword[if] identifier[isinstance] ( identifier[context] [ literal[string] ], identifier[str] ):
identifier[val] = identifier[context] . identifier[get_formatted] ( literal[string] )
keyword[else] :
identifier[val] = identifier[context] [ literal[string] ]
identifier[logger] . identifier[info] ( identifier[val] )
identifier[logger] . identifier[debug] ( literal[string] ) | def run_step(context):
"""Simple echo. Outputs context['echoMe'].
Args:
context: dictionary-like. context is mandatory.
context must contain key 'echoMe'
context['echoMe'] will echo the value to logger.
This logger could well be stdout.
When you execute the pipeline, it should look something like this:
pypyr [name here] 'echoMe=test', assuming a keyvaluepair context parser.
"""
logger.debug('started')
assert context, "context must be set for echo. Did you set 'echoMe=text here'?"
context.assert_key_exists('echoMe', __name__)
if isinstance(context['echoMe'], str):
val = context.get_formatted('echoMe') # depends on [control=['if'], data=[]]
else:
val = context['echoMe']
logger.info(val)
logger.debug('done') |
def get(self, key):
"""
Returns an address by user controlled input ID
:param key: an input_id used to tag a lookup address
:return: a matching Address
"""
try:
return self[self.id_lookup.get(key)]
except TypeError:
raise KeyError | def function[get, parameter[self, key]]:
constant[
Returns an address by user controlled input ID
:param key: an input_id used to tag a lookup address
:return: a matching Address
]
<ast.Try object at 0x7da1b235f010> | keyword[def] identifier[get] ( identifier[self] , identifier[key] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] [ identifier[self] . identifier[id_lookup] . identifier[get] ( identifier[key] )]
keyword[except] identifier[TypeError] :
keyword[raise] identifier[KeyError] | def get(self, key):
"""
Returns an address by user controlled input ID
:param key: an input_id used to tag a lookup address
:return: a matching Address
"""
try:
return self[self.id_lookup.get(key)] # depends on [control=['try'], data=[]]
except TypeError:
raise KeyError # depends on [control=['except'], data=[]] |
def _setEndpoint(self, location):
'''
Set the endpoint after when Salesforce returns the URL after successful login()
'''
# suds 0.3.7+ supports multiple wsdl services, but breaks setlocation :(
# see https://fedorahosted.org/suds/ticket/261
try:
self._sforce.set_options(location = location)
except:
self._sforce.wsdl.service.setlocation(location)
self._location = location | def function[_setEndpoint, parameter[self, location]]:
constant[
Set the endpoint after when Salesforce returns the URL after successful login()
]
<ast.Try object at 0x7da1b28dc130>
name[self]._location assign[=] name[location] | keyword[def] identifier[_setEndpoint] ( identifier[self] , identifier[location] ):
literal[string]
keyword[try] :
identifier[self] . identifier[_sforce] . identifier[set_options] ( identifier[location] = identifier[location] )
keyword[except] :
identifier[self] . identifier[_sforce] . identifier[wsdl] . identifier[service] . identifier[setlocation] ( identifier[location] )
identifier[self] . identifier[_location] = identifier[location] | def _setEndpoint(self, location):
"""
Set the endpoint after when Salesforce returns the URL after successful login()
"""
# suds 0.3.7+ supports multiple wsdl services, but breaks setlocation :(
# see https://fedorahosted.org/suds/ticket/261
try:
self._sforce.set_options(location=location) # depends on [control=['try'], data=[]]
except:
self._sforce.wsdl.service.setlocation(location) # depends on [control=['except'], data=[]]
self._location = location |
def delete(self, key):
'''Removes the object named by `key`.
Args:
key: Key naming the object to remove.
'''
path = self.object_path(key)
if os.path.exists(path):
os.remove(path) | def function[delete, parameter[self, key]]:
constant[Removes the object named by `key`.
Args:
key: Key naming the object to remove.
]
variable[path] assign[=] call[name[self].object_path, parameter[name[key]]]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
call[name[os].remove, parameter[name[path]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[key] ):
literal[string]
identifier[path] = identifier[self] . identifier[object_path] ( identifier[key] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
identifier[os] . identifier[remove] ( identifier[path] ) | def delete(self, key):
"""Removes the object named by `key`.
Args:
key: Key naming the object to remove.
"""
path = self.object_path(key)
if os.path.exists(path):
os.remove(path) # depends on [control=['if'], data=[]] |
def set_mouse_button_callback(window, cbfun):
"""
Sets the mouse button callback.
Wrapper for:
GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* window, GLFWmousebuttonfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _mouse_button_callback_repository:
previous_callback = _mouse_button_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWmousebuttonfun(cbfun)
_mouse_button_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetMouseButtonCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] | def function[set_mouse_button_callback, parameter[window, cbfun]]:
constant[
Sets the mouse button callback.
Wrapper for:
GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* window, GLFWmousebuttonfun cbfun);
]
variable[window_addr] assign[=] call[name[ctypes].cast, parameter[call[name[ctypes].pointer, parameter[name[window]]], call[name[ctypes].POINTER, parameter[name[ctypes].c_long]]]].contents.value
if compare[name[window_addr] in name[_mouse_button_callback_repository]] begin[:]
variable[previous_callback] assign[=] call[name[_mouse_button_callback_repository]][name[window_addr]]
if compare[name[cbfun] is constant[None]] begin[:]
variable[cbfun] assign[=] constant[0]
variable[c_cbfun] assign[=] call[name[_GLFWmousebuttonfun], parameter[name[cbfun]]]
call[name[_mouse_button_callback_repository]][name[window_addr]] assign[=] tuple[[<ast.Name object at 0x7da18bc70730>, <ast.Name object at 0x7da18bc73be0>]]
variable[cbfun] assign[=] name[c_cbfun]
call[name[_glfw].glfwSetMouseButtonCallback, parameter[name[window], name[cbfun]]]
if <ast.BoolOp object at 0x7da18bc73e20> begin[:]
return[call[name[previous_callback]][constant[0]]] | keyword[def] identifier[set_mouse_button_callback] ( identifier[window] , identifier[cbfun] ):
literal[string]
identifier[window_addr] = identifier[ctypes] . identifier[cast] ( identifier[ctypes] . identifier[pointer] ( identifier[window] ),
identifier[ctypes] . identifier[POINTER] ( identifier[ctypes] . identifier[c_long] )). identifier[contents] . identifier[value]
keyword[if] identifier[window_addr] keyword[in] identifier[_mouse_button_callback_repository] :
identifier[previous_callback] = identifier[_mouse_button_callback_repository] [ identifier[window_addr] ]
keyword[else] :
identifier[previous_callback] = keyword[None]
keyword[if] identifier[cbfun] keyword[is] keyword[None] :
identifier[cbfun] = literal[int]
identifier[c_cbfun] = identifier[_GLFWmousebuttonfun] ( identifier[cbfun] )
identifier[_mouse_button_callback_repository] [ identifier[window_addr] ]=( identifier[cbfun] , identifier[c_cbfun] )
identifier[cbfun] = identifier[c_cbfun]
identifier[_glfw] . identifier[glfwSetMouseButtonCallback] ( identifier[window] , identifier[cbfun] )
keyword[if] identifier[previous_callback] keyword[is] keyword[not] keyword[None] keyword[and] identifier[previous_callback] [ literal[int] ]!= literal[int] :
keyword[return] identifier[previous_callback] [ literal[int] ] | def set_mouse_button_callback(window, cbfun):
"""
Sets the mouse button callback.
Wrapper for:
GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* window, GLFWmousebuttonfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _mouse_button_callback_repository:
previous_callback = _mouse_button_callback_repository[window_addr] # depends on [control=['if'], data=['window_addr', '_mouse_button_callback_repository']]
else:
previous_callback = None
if cbfun is None:
cbfun = 0 # depends on [control=['if'], data=['cbfun']]
c_cbfun = _GLFWmousebuttonfun(cbfun)
_mouse_button_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetMouseButtonCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] # depends on [control=['if'], data=[]] |
def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
::
elements = driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text) | def function[find_elements_by_link_text, parameter[self, text]]:
constant[
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
::
elements = driver.find_elements_by_link_text('Sign In')
]
return[call[name[self].find_elements, parameter[]]] | keyword[def] identifier[find_elements_by_link_text] ( identifier[self] , identifier[text] ):
literal[string]
keyword[return] identifier[self] . identifier[find_elements] ( identifier[by] = identifier[By] . identifier[LINK_TEXT] , identifier[value] = identifier[text] ) | def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
::
elements = driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text) |
def find_config(test_file=None, defaults=None, root=os.curdir):
"""
Find the path to the default config file.
We look at :root: for the :default: config file. If we can't find it
there we start looking at the parent directory recursively until we
find a file named :default: and return the absolute path to it.
If we can't find anything, we return None.
Args:
default: The name of the config file we look for.
root: The directory to start looking for.
Returns:
Path to the default config file, None if we can't find anything.
"""
if defaults is None:
defaults = [".benchbuild.yml", ".benchbuild.yaml"]
def walk_rec(cur_path, root):
cur_path = local.path(root) / test_file
if cur_path.exists():
return cur_path
new_root = local.path(root) / os.pardir
return walk_rec(cur_path, new_root) if new_root != root else None
if test_file is not None:
return walk_rec(test_file, root)
for test_file in defaults:
ret = walk_rec(test_file, root)
if ret is not None:
return ret | def function[find_config, parameter[test_file, defaults, root]]:
constant[
Find the path to the default config file.
We look at :root: for the :default: config file. If we can't find it
there we start looking at the parent directory recursively until we
find a file named :default: and return the absolute path to it.
If we can't find anything, we return None.
Args:
default: The name of the config file we look for.
root: The directory to start looking for.
Returns:
Path to the default config file, None if we can't find anything.
]
if compare[name[defaults] is constant[None]] begin[:]
variable[defaults] assign[=] list[[<ast.Constant object at 0x7da20c992a40>, <ast.Constant object at 0x7da20c9914e0>]]
def function[walk_rec, parameter[cur_path, root]]:
variable[cur_path] assign[=] binary_operation[call[name[local].path, parameter[name[root]]] / name[test_file]]
if call[name[cur_path].exists, parameter[]] begin[:]
return[name[cur_path]]
variable[new_root] assign[=] binary_operation[call[name[local].path, parameter[name[root]]] / name[os].pardir]
return[<ast.IfExp object at 0x7da1b2347a90>]
if compare[name[test_file] is_not constant[None]] begin[:]
return[call[name[walk_rec], parameter[name[test_file], name[root]]]]
for taget[name[test_file]] in starred[name[defaults]] begin[:]
variable[ret] assign[=] call[name[walk_rec], parameter[name[test_file], name[root]]]
if compare[name[ret] is_not constant[None]] begin[:]
return[name[ret]] | keyword[def] identifier[find_config] ( identifier[test_file] = keyword[None] , identifier[defaults] = keyword[None] , identifier[root] = identifier[os] . identifier[curdir] ):
literal[string]
keyword[if] identifier[defaults] keyword[is] keyword[None] :
identifier[defaults] =[ literal[string] , literal[string] ]
keyword[def] identifier[walk_rec] ( identifier[cur_path] , identifier[root] ):
identifier[cur_path] = identifier[local] . identifier[path] ( identifier[root] )/ identifier[test_file]
keyword[if] identifier[cur_path] . identifier[exists] ():
keyword[return] identifier[cur_path]
identifier[new_root] = identifier[local] . identifier[path] ( identifier[root] )/ identifier[os] . identifier[pardir]
keyword[return] identifier[walk_rec] ( identifier[cur_path] , identifier[new_root] ) keyword[if] identifier[new_root] != identifier[root] keyword[else] keyword[None]
keyword[if] identifier[test_file] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[walk_rec] ( identifier[test_file] , identifier[root] )
keyword[for] identifier[test_file] keyword[in] identifier[defaults] :
identifier[ret] = identifier[walk_rec] ( identifier[test_file] , identifier[root] )
keyword[if] identifier[ret] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[ret] | def find_config(test_file=None, defaults=None, root=os.curdir):
"""
Find the path to the default config file.
We look at :root: for the :default: config file. If we can't find it
there we start looking at the parent directory recursively until we
find a file named :default: and return the absolute path to it.
If we can't find anything, we return None.
Args:
default: The name of the config file we look for.
root: The directory to start looking for.
Returns:
Path to the default config file, None if we can't find anything.
"""
if defaults is None:
defaults = ['.benchbuild.yml', '.benchbuild.yaml'] # depends on [control=['if'], data=['defaults']]
def walk_rec(cur_path, root):
cur_path = local.path(root) / test_file
if cur_path.exists():
return cur_path # depends on [control=['if'], data=[]]
new_root = local.path(root) / os.pardir
return walk_rec(cur_path, new_root) if new_root != root else None
if test_file is not None:
return walk_rec(test_file, root) # depends on [control=['if'], data=['test_file']]
for test_file in defaults:
ret = walk_rec(test_file, root)
if ret is not None:
return ret # depends on [control=['if'], data=['ret']] # depends on [control=['for'], data=['test_file']] |
def cluster(list_of_texts, num_clusters=3):
"""
Cluster a list of texts into a predefined number of clusters.
:param list_of_texts: a list of untokenized texts
:param num_clusters: the predefined number of clusters
:return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1]
"""
pipeline = Pipeline([
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clust", KMeans(n_clusters=num_clusters))
])
try:
clusters = pipeline.fit_predict(list_of_texts)
except ValueError:
clusters = list(range(len(list_of_texts)))
return clusters | def function[cluster, parameter[list_of_texts, num_clusters]]:
constant[
Cluster a list of texts into a predefined number of clusters.
:param list_of_texts: a list of untokenized texts
:param num_clusters: the predefined number of clusters
:return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1]
]
variable[pipeline] assign[=] call[name[Pipeline], parameter[list[[<ast.Tuple object at 0x7da204960ac0>, <ast.Tuple object at 0x7da2049607f0>, <ast.Tuple object at 0x7da204961c30>]]]]
<ast.Try object at 0x7da204963070>
return[name[clusters]] | keyword[def] identifier[cluster] ( identifier[list_of_texts] , identifier[num_clusters] = literal[int] ):
literal[string]
identifier[pipeline] = identifier[Pipeline] ([
( literal[string] , identifier[CountVectorizer] ()),
( literal[string] , identifier[TfidfTransformer] ()),
( literal[string] , identifier[KMeans] ( identifier[n_clusters] = identifier[num_clusters] ))
])
keyword[try] :
identifier[clusters] = identifier[pipeline] . identifier[fit_predict] ( identifier[list_of_texts] )
keyword[except] identifier[ValueError] :
identifier[clusters] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[list_of_texts] )))
keyword[return] identifier[clusters] | def cluster(list_of_texts, num_clusters=3):
"""
Cluster a list of texts into a predefined number of clusters.
:param list_of_texts: a list of untokenized texts
:param num_clusters: the predefined number of clusters
:return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1]
"""
pipeline = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clust', KMeans(n_clusters=num_clusters))])
try:
clusters = pipeline.fit_predict(list_of_texts) # depends on [control=['try'], data=[]]
except ValueError:
clusters = list(range(len(list_of_texts))) # depends on [control=['except'], data=[]]
return clusters |
def get_waiter(self, waiter_name):
"""Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
config = self._get_waiter_config()
if not config:
raise ValueError("Waiter does not exist: %s" % waiter_name)
model = waiter.WaiterModel(config)
mapping = {}
for name in model.waiter_names:
mapping[xform_name(name)] = name
if waiter_name not in mapping:
raise ValueError("Waiter does not exist: %s" % waiter_name)
return waiter.create_waiter_with_client(
mapping[waiter_name], model, self, loop=self._loop) | def function[get_waiter, parameter[self, waiter_name]]:
constant[Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
]
variable[config] assign[=] call[name[self]._get_waiter_config, parameter[]]
if <ast.UnaryOp object at 0x7da20c6c5720> begin[:]
<ast.Raise object at 0x7da20c6c6260>
variable[model] assign[=] call[name[waiter].WaiterModel, parameter[name[config]]]
variable[mapping] assign[=] dictionary[[], []]
for taget[name[name]] in starred[name[model].waiter_names] begin[:]
call[name[mapping]][call[name[xform_name], parameter[name[name]]]] assign[=] name[name]
if compare[name[waiter_name] <ast.NotIn object at 0x7da2590d7190> name[mapping]] begin[:]
<ast.Raise object at 0x7da20c6c7520>
return[call[name[waiter].create_waiter_with_client, parameter[call[name[mapping]][name[waiter_name]], name[model], name[self]]]] | keyword[def] identifier[get_waiter] ( identifier[self] , identifier[waiter_name] ):
literal[string]
identifier[config] = identifier[self] . identifier[_get_waiter_config] ()
keyword[if] keyword[not] identifier[config] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[waiter_name] )
identifier[model] = identifier[waiter] . identifier[WaiterModel] ( identifier[config] )
identifier[mapping] ={}
keyword[for] identifier[name] keyword[in] identifier[model] . identifier[waiter_names] :
identifier[mapping] [ identifier[xform_name] ( identifier[name] )]= identifier[name]
keyword[if] identifier[waiter_name] keyword[not] keyword[in] identifier[mapping] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[waiter_name] )
keyword[return] identifier[waiter] . identifier[create_waiter_with_client] (
identifier[mapping] [ identifier[waiter_name] ], identifier[model] , identifier[self] , identifier[loop] = identifier[self] . identifier[_loop] ) | def get_waiter(self, waiter_name):
"""Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
config = self._get_waiter_config()
if not config:
raise ValueError('Waiter does not exist: %s' % waiter_name) # depends on [control=['if'], data=[]]
model = waiter.WaiterModel(config)
mapping = {}
for name in model.waiter_names:
mapping[xform_name(name)] = name # depends on [control=['for'], data=['name']]
if waiter_name not in mapping:
raise ValueError('Waiter does not exist: %s' % waiter_name) # depends on [control=['if'], data=['waiter_name']]
return waiter.create_waiter_with_client(mapping[waiter_name], model, self, loop=self._loop) |
def enqueue(self, stream_url, offset=0, opaque_token=None):
"""Adds stream to the queue. Does not impact the currently playing stream."""
directive = self._play_directive('ENQUEUE')
audio_item = self._audio_item(stream_url=stream_url,
offset=offset,
push_buffer=False,
opaque_token=opaque_token)
audio_item['stream']['expectedPreviousToken'] = current_stream.token
directive['audioItem'] = audio_item
self._response['directives'].append(directive)
return self | def function[enqueue, parameter[self, stream_url, offset, opaque_token]]:
constant[Adds stream to the queue. Does not impact the currently playing stream.]
variable[directive] assign[=] call[name[self]._play_directive, parameter[constant[ENQUEUE]]]
variable[audio_item] assign[=] call[name[self]._audio_item, parameter[]]
call[call[name[audio_item]][constant[stream]]][constant[expectedPreviousToken]] assign[=] name[current_stream].token
call[name[directive]][constant[audioItem]] assign[=] name[audio_item]
call[call[name[self]._response][constant[directives]].append, parameter[name[directive]]]
return[name[self]] | keyword[def] identifier[enqueue] ( identifier[self] , identifier[stream_url] , identifier[offset] = literal[int] , identifier[opaque_token] = keyword[None] ):
literal[string]
identifier[directive] = identifier[self] . identifier[_play_directive] ( literal[string] )
identifier[audio_item] = identifier[self] . identifier[_audio_item] ( identifier[stream_url] = identifier[stream_url] ,
identifier[offset] = identifier[offset] ,
identifier[push_buffer] = keyword[False] ,
identifier[opaque_token] = identifier[opaque_token] )
identifier[audio_item] [ literal[string] ][ literal[string] ]= identifier[current_stream] . identifier[token]
identifier[directive] [ literal[string] ]= identifier[audio_item]
identifier[self] . identifier[_response] [ literal[string] ]. identifier[append] ( identifier[directive] )
keyword[return] identifier[self] | def enqueue(self, stream_url, offset=0, opaque_token=None):
"""Adds stream to the queue. Does not impact the currently playing stream."""
directive = self._play_directive('ENQUEUE')
audio_item = self._audio_item(stream_url=stream_url, offset=offset, push_buffer=False, opaque_token=opaque_token)
audio_item['stream']['expectedPreviousToken'] = current_stream.token
directive['audioItem'] = audio_item
self._response['directives'].append(directive)
return self |
def print_report_on_all_logs() -> None:
"""
Use :func:`print` to report information on all logs.
"""
d = {}
# noinspection PyUnresolvedReferences
for name, obj in logging.Logger.manager.loggerDict.items():
d[name] = get_log_report(obj)
rootlogger = logging.getLogger()
d['(root logger)'] = get_log_report(rootlogger)
print(json.dumps(d, sort_keys=True, indent=4, separators=(',', ': '))) | def function[print_report_on_all_logs, parameter[]]:
constant[
Use :func:`print` to report information on all logs.
]
variable[d] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b189dc90>, <ast.Name object at 0x7da1b189ef20>]]] in starred[call[name[logging].Logger.manager.loggerDict.items, parameter[]]] begin[:]
call[name[d]][name[name]] assign[=] call[name[get_log_report], parameter[name[obj]]]
variable[rootlogger] assign[=] call[name[logging].getLogger, parameter[]]
call[name[d]][constant[(root logger)]] assign[=] call[name[get_log_report], parameter[name[rootlogger]]]
call[name[print], parameter[call[name[json].dumps, parameter[name[d]]]]] | keyword[def] identifier[print_report_on_all_logs] ()-> keyword[None] :
literal[string]
identifier[d] ={}
keyword[for] identifier[name] , identifier[obj] keyword[in] identifier[logging] . identifier[Logger] . identifier[manager] . identifier[loggerDict] . identifier[items] ():
identifier[d] [ identifier[name] ]= identifier[get_log_report] ( identifier[obj] )
identifier[rootlogger] = identifier[logging] . identifier[getLogger] ()
identifier[d] [ literal[string] ]= identifier[get_log_report] ( identifier[rootlogger] )
identifier[print] ( identifier[json] . identifier[dumps] ( identifier[d] , identifier[sort_keys] = keyword[True] , identifier[indent] = literal[int] , identifier[separators] =( literal[string] , literal[string] ))) | def print_report_on_all_logs() -> None:
"""
Use :func:`print` to report information on all logs.
"""
d = {}
# noinspection PyUnresolvedReferences
for (name, obj) in logging.Logger.manager.loggerDict.items():
d[name] = get_log_report(obj) # depends on [control=['for'], data=[]]
rootlogger = logging.getLogger()
d['(root logger)'] = get_log_report(rootlogger)
print(json.dumps(d, sort_keys=True, indent=4, separators=(',', ': '))) |
def power_off(self, interval=200):
"""230v power off"""
if self.__power_off_port is None:
cij.err("cij.usb.relay: Invalid USB_RELAY_POWER_OFF")
return 1
return self.__press(self.__power_off_port, interval=interval) | def function[power_off, parameter[self, interval]]:
constant[230v power off]
if compare[name[self].__power_off_port is constant[None]] begin[:]
call[name[cij].err, parameter[constant[cij.usb.relay: Invalid USB_RELAY_POWER_OFF]]]
return[constant[1]]
return[call[name[self].__press, parameter[name[self].__power_off_port]]] | keyword[def] identifier[power_off] ( identifier[self] , identifier[interval] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[__power_off_port] keyword[is] keyword[None] :
identifier[cij] . identifier[err] ( literal[string] )
keyword[return] literal[int]
keyword[return] identifier[self] . identifier[__press] ( identifier[self] . identifier[__power_off_port] , identifier[interval] = identifier[interval] ) | def power_off(self, interval=200):
"""230v power off"""
if self.__power_off_port is None:
cij.err('cij.usb.relay: Invalid USB_RELAY_POWER_OFF')
return 1 # depends on [control=['if'], data=[]]
return self.__press(self.__power_off_port, interval=interval) |
def _fromstring(value):
'''_fromstring
Convert XML string value to None, boolean, int or float.
'''
if not value:
return [None]
std_value = value.strip().lower()
if std_value == 'true':
return True
elif std_value == 'false':
return False
try:
return int(std_value)
except ValueError:
pass
try:
return float(std_value)
except ValueError:
pass
return value | def function[_fromstring, parameter[value]]:
constant[_fromstring
Convert XML string value to None, boolean, int or float.
]
if <ast.UnaryOp object at 0x7da1b25289a0> begin[:]
return[list[[<ast.Constant object at 0x7da1b2528460>]]]
variable[std_value] assign[=] call[call[name[value].strip, parameter[]].lower, parameter[]]
if compare[name[std_value] equal[==] constant[true]] begin[:]
return[constant[True]]
<ast.Try object at 0x7da1b252a260>
<ast.Try object at 0x7da1b252b070>
return[name[value]] | keyword[def] identifier[_fromstring] ( identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[value] :
keyword[return] [ keyword[None] ]
identifier[std_value] = identifier[value] . identifier[strip] (). identifier[lower] ()
keyword[if] identifier[std_value] == literal[string] :
keyword[return] keyword[True]
keyword[elif] identifier[std_value] == literal[string] :
keyword[return] keyword[False]
keyword[try] :
keyword[return] identifier[int] ( identifier[std_value] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[try] :
keyword[return] identifier[float] ( identifier[std_value] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[value] | def _fromstring(value):
"""_fromstring
Convert XML string value to None, boolean, int or float.
"""
if not value:
return [None] # depends on [control=['if'], data=[]]
std_value = value.strip().lower()
if std_value == 'true':
return True # depends on [control=['if'], data=[]]
elif std_value == 'false':
return False # depends on [control=['if'], data=[]]
try:
return int(std_value) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
try:
return float(std_value) # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
return value |
def mask(x, indices):
"""
The same as x[indices], but return an empty array if indices are empty,
instead of returning all x elements,
and handles sparse "vectors".
"""
indices_shape = (
[len(indices)] if isinstance(indices, list) else indices.shape)
if not indices_shape[0]:
return np.array([])
elif is_sparse_vector(x) and len(indices_shape) == 1:
return x[0, indices].toarray()[0]
else:
return x[indices] | def function[mask, parameter[x, indices]]:
constant[
The same as x[indices], but return an empty array if indices are empty,
instead of returning all x elements,
and handles sparse "vectors".
]
variable[indices_shape] assign[=] <ast.IfExp object at 0x7da1b26ae230>
if <ast.UnaryOp object at 0x7da1b26afe50> begin[:]
return[call[name[np].array, parameter[list[[]]]]] | keyword[def] identifier[mask] ( identifier[x] , identifier[indices] ):
literal[string]
identifier[indices_shape] =(
[ identifier[len] ( identifier[indices] )] keyword[if] identifier[isinstance] ( identifier[indices] , identifier[list] ) keyword[else] identifier[indices] . identifier[shape] )
keyword[if] keyword[not] identifier[indices_shape] [ literal[int] ]:
keyword[return] identifier[np] . identifier[array] ([])
keyword[elif] identifier[is_sparse_vector] ( identifier[x] ) keyword[and] identifier[len] ( identifier[indices_shape] )== literal[int] :
keyword[return] identifier[x] [ literal[int] , identifier[indices] ]. identifier[toarray] ()[ literal[int] ]
keyword[else] :
keyword[return] identifier[x] [ identifier[indices] ] | def mask(x, indices):
"""
The same as x[indices], but return an empty array if indices are empty,
instead of returning all x elements,
and handles sparse "vectors".
"""
indices_shape = [len(indices)] if isinstance(indices, list) else indices.shape
if not indices_shape[0]:
return np.array([]) # depends on [control=['if'], data=[]]
elif is_sparse_vector(x) and len(indices_shape) == 1:
return x[0, indices].toarray()[0] # depends on [control=['if'], data=[]]
else:
return x[indices] |
def AddPathIfNotExists(env_dict, key, path, sep=os.pathsep):
"""This function will take 'key' out of the dictionary
'env_dict', then add the path 'path' to that key if it is not
already there. This treats the value of env_dict[key] as if it
has a similar format to the PATH variable...a list of paths
separated by tokens. The 'path' will get added to the list if it
is not already there."""
try:
is_list = 1
paths = env_dict[key]
if not is_List(env_dict[key]):
paths = paths.split(sep)
is_list = 0
if os.path.normcase(path) not in list(map(os.path.normcase, paths)):
paths = [ path ] + paths
if is_list:
env_dict[key] = paths
else:
env_dict[key] = sep.join(paths)
except KeyError:
env_dict[key] = path | def function[AddPathIfNotExists, parameter[env_dict, key, path, sep]]:
constant[This function will take 'key' out of the dictionary
'env_dict', then add the path 'path' to that key if it is not
already there. This treats the value of env_dict[key] as if it
has a similar format to the PATH variable...a list of paths
separated by tokens. The 'path' will get added to the list if it
is not already there.]
<ast.Try object at 0x7da18f8105b0> | keyword[def] identifier[AddPathIfNotExists] ( identifier[env_dict] , identifier[key] , identifier[path] , identifier[sep] = identifier[os] . identifier[pathsep] ):
literal[string]
keyword[try] :
identifier[is_list] = literal[int]
identifier[paths] = identifier[env_dict] [ identifier[key] ]
keyword[if] keyword[not] identifier[is_List] ( identifier[env_dict] [ identifier[key] ]):
identifier[paths] = identifier[paths] . identifier[split] ( identifier[sep] )
identifier[is_list] = literal[int]
keyword[if] identifier[os] . identifier[path] . identifier[normcase] ( identifier[path] ) keyword[not] keyword[in] identifier[list] ( identifier[map] ( identifier[os] . identifier[path] . identifier[normcase] , identifier[paths] )):
identifier[paths] =[ identifier[path] ]+ identifier[paths]
keyword[if] identifier[is_list] :
identifier[env_dict] [ identifier[key] ]= identifier[paths]
keyword[else] :
identifier[env_dict] [ identifier[key] ]= identifier[sep] . identifier[join] ( identifier[paths] )
keyword[except] identifier[KeyError] :
identifier[env_dict] [ identifier[key] ]= identifier[path] | def AddPathIfNotExists(env_dict, key, path, sep=os.pathsep):
"""This function will take 'key' out of the dictionary
'env_dict', then add the path 'path' to that key if it is not
already there. This treats the value of env_dict[key] as if it
has a similar format to the PATH variable...a list of paths
separated by tokens. The 'path' will get added to the list if it
is not already there."""
try:
is_list = 1
paths = env_dict[key]
if not is_List(env_dict[key]):
paths = paths.split(sep)
is_list = 0 # depends on [control=['if'], data=[]]
if os.path.normcase(path) not in list(map(os.path.normcase, paths)):
paths = [path] + paths # depends on [control=['if'], data=[]]
if is_list:
env_dict[key] = paths # depends on [control=['if'], data=[]]
else:
env_dict[key] = sep.join(paths) # depends on [control=['try'], data=[]]
except KeyError:
env_dict[key] = path # depends on [control=['except'], data=[]] |
def cam3_gen_source(ext, build_dir):
'''Add CAM3 fortran source if Fortran 90 compiler available,
if no compiler is found do not try to build the extension.'''
# Fortran 90 sources in order of compilation
fort90source = ['pmgrid.F90',
'prescribed_aerosols.F90',
'shr_kind_mod.F90',
'quicksort.F90',
'abortutils.F90',
'absems.F90',
'wv_saturation.F90',
'aer_optics.F90',
'cmparray_mod.F90',
'shr_const_mod.F90',
'physconst.F90',
'pkg_cldoptics.F90',
'gffgch.F90',
'chem_surfvals.F90',
'volcrad.F90',
'radae.F90',
'radlw.F90',
'radsw.F90',
'crm.F90',]
#thispath = abspath(config.local_path)
thispath = config.local_path
sourcelist = []
sourcelist.append(join(thispath,'_cam3.pyf'))
for item in fort90source:
sourcelist.append(join(thispath, 'src', item))
sourcelist.append(join(thispath,'Driver.f90'))
try:
config.have_f90c()
return sourcelist
except:
print('No Fortran 90 compiler found, not building CAM3 extension!')
return None | def function[cam3_gen_source, parameter[ext, build_dir]]:
constant[Add CAM3 fortran source if Fortran 90 compiler available,
if no compiler is found do not try to build the extension.]
variable[fort90source] assign[=] list[[<ast.Constant object at 0x7da1b13b46a0>, <ast.Constant object at 0x7da1b13b77c0>, <ast.Constant object at 0x7da1b13b7d60>, <ast.Constant object at 0x7da1b13b4df0>, <ast.Constant object at 0x7da1b13b71f0>, <ast.Constant object at 0x7da1b13b7100>, <ast.Constant object at 0x7da1b13b7dc0>, <ast.Constant object at 0x7da1b13b5510>, <ast.Constant object at 0x7da1b13b69b0>, <ast.Constant object at 0x7da1b13b7610>, <ast.Constant object at 0x7da1b13b5600>, <ast.Constant object at 0x7da1b13b7cd0>, <ast.Constant object at 0x7da1b13b4f40>, <ast.Constant object at 0x7da1b13b68c0>, <ast.Constant object at 0x7da1b13b5c00>, <ast.Constant object at 0x7da1b13b4160>, <ast.Constant object at 0x7da1b13b5990>, <ast.Constant object at 0x7da1b13b4d60>, <ast.Constant object at 0x7da1b13b4dc0>]]
variable[thispath] assign[=] name[config].local_path
variable[sourcelist] assign[=] list[[]]
call[name[sourcelist].append, parameter[call[name[join], parameter[name[thispath], constant[_cam3.pyf]]]]]
for taget[name[item]] in starred[name[fort90source]] begin[:]
call[name[sourcelist].append, parameter[call[name[join], parameter[name[thispath], constant[src], name[item]]]]]
call[name[sourcelist].append, parameter[call[name[join], parameter[name[thispath], constant[Driver.f90]]]]]
<ast.Try object at 0x7da1b13b4d90> | keyword[def] identifier[cam3_gen_source] ( identifier[ext] , identifier[build_dir] ):
literal[string]
identifier[fort90source] =[ literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,]
identifier[thispath] = identifier[config] . identifier[local_path]
identifier[sourcelist] =[]
identifier[sourcelist] . identifier[append] ( identifier[join] ( identifier[thispath] , literal[string] ))
keyword[for] identifier[item] keyword[in] identifier[fort90source] :
identifier[sourcelist] . identifier[append] ( identifier[join] ( identifier[thispath] , literal[string] , identifier[item] ))
identifier[sourcelist] . identifier[append] ( identifier[join] ( identifier[thispath] , literal[string] ))
keyword[try] :
identifier[config] . identifier[have_f90c] ()
keyword[return] identifier[sourcelist]
keyword[except] :
identifier[print] ( literal[string] )
keyword[return] keyword[None] | def cam3_gen_source(ext, build_dir):
"""Add CAM3 fortran source if Fortran 90 compiler available,
if no compiler is found do not try to build the extension."""
# Fortran 90 sources in order of compilation
fort90source = ['pmgrid.F90', 'prescribed_aerosols.F90', 'shr_kind_mod.F90', 'quicksort.F90', 'abortutils.F90', 'absems.F90', 'wv_saturation.F90', 'aer_optics.F90', 'cmparray_mod.F90', 'shr_const_mod.F90', 'physconst.F90', 'pkg_cldoptics.F90', 'gffgch.F90', 'chem_surfvals.F90', 'volcrad.F90', 'radae.F90', 'radlw.F90', 'radsw.F90', 'crm.F90']
#thispath = abspath(config.local_path)
thispath = config.local_path
sourcelist = []
sourcelist.append(join(thispath, '_cam3.pyf'))
for item in fort90source:
sourcelist.append(join(thispath, 'src', item)) # depends on [control=['for'], data=['item']]
sourcelist.append(join(thispath, 'Driver.f90'))
try:
config.have_f90c()
return sourcelist # depends on [control=['try'], data=[]]
except:
print('No Fortran 90 compiler found, not building CAM3 extension!')
return None # depends on [control=['except'], data=[]] |
def asynchronous(func):
"""Return `func` in a "smart" asynchronous-aware wrapper.
If `func` is called within the event-loop — i.e. when it is running — this
returns the result of `func` without alteration. However, when called from
outside of the event-loop, and the result is awaitable, the result will be
passed though the current event-loop's `run_until_complete` method.
In other words, this automatically blocks when calling an asynchronous
function from outside of the event-loop, and so makes interactive use of
these APIs far more intuitive.
"""
@wraps(func)
def wrapper(*args, **kwargs):
eventloop = get_event_loop()
result = func(*args, **kwargs)
if not eventloop.is_running():
while isawaitable(result):
result = eventloop.run_until_complete(result)
return result
return wrapper | def function[asynchronous, parameter[func]]:
constant[Return `func` in a "smart" asynchronous-aware wrapper.
If `func` is called within the event-loop — i.e. when it is running — this
returns the result of `func` without alteration. However, when called from
outside of the event-loop, and the result is awaitable, the result will be
passed though the current event-loop's `run_until_complete` method.
In other words, this automatically blocks when calling an asynchronous
function from outside of the event-loop, and so makes interactive use of
these APIs far more intuitive.
]
def function[wrapper, parameter[]]:
variable[eventloop] assign[=] call[name[get_event_loop], parameter[]]
variable[result] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da20c76e230>]]
if <ast.UnaryOp object at 0x7da20c76e8c0> begin[:]
while call[name[isawaitable], parameter[name[result]]] begin[:]
variable[result] assign[=] call[name[eventloop].run_until_complete, parameter[name[result]]]
return[name[result]]
return[name[wrapper]] | keyword[def] identifier[asynchronous] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[eventloop] = identifier[get_event_loop] ()
identifier[result] = identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[eventloop] . identifier[is_running] ():
keyword[while] identifier[isawaitable] ( identifier[result] ):
identifier[result] = identifier[eventloop] . identifier[run_until_complete] ( identifier[result] )
keyword[return] identifier[result]
keyword[return] identifier[wrapper] | def asynchronous(func):
"""Return `func` in a "smart" asynchronous-aware wrapper.
If `func` is called within the event-loop — i.e. when it is running — this
returns the result of `func` without alteration. However, when called from
outside of the event-loop, and the result is awaitable, the result will be
passed though the current event-loop's `run_until_complete` method.
In other words, this automatically blocks when calling an asynchronous
function from outside of the event-loop, and so makes interactive use of
these APIs far more intuitive.
"""
@wraps(func)
def wrapper(*args, **kwargs):
eventloop = get_event_loop()
result = func(*args, **kwargs)
if not eventloop.is_running():
while isawaitable(result):
result = eventloop.run_until_complete(result) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
return result
return wrapper |
def _seq(self, locus, term, rank, accession):
"""
creates GFE from HLA sequence and locus
:param locus: string containing HLA locus.
:param sequence: string containing sequence data.
:return: GFEobject.
"""
try:
feature = self.api.get_feature_by_path(locus,
term,
rank,
accession)
return feature
except ApiException as e:
print("Exception when calling DefaultApi->get_feature_by_path: %s\n" % e)
return '' | def function[_seq, parameter[self, locus, term, rank, accession]]:
constant[
creates GFE from HLA sequence and locus
:param locus: string containing HLA locus.
:param sequence: string containing sequence data.
:return: GFEobject.
]
<ast.Try object at 0x7da1b25dad10> | keyword[def] identifier[_seq] ( identifier[self] , identifier[locus] , identifier[term] , identifier[rank] , identifier[accession] ):
literal[string]
keyword[try] :
identifier[feature] = identifier[self] . identifier[api] . identifier[get_feature_by_path] ( identifier[locus] ,
identifier[term] ,
identifier[rank] ,
identifier[accession] )
keyword[return] identifier[feature]
keyword[except] identifier[ApiException] keyword[as] identifier[e] :
identifier[print] ( literal[string] % identifier[e] )
keyword[return] literal[string] | def _seq(self, locus, term, rank, accession):
"""
creates GFE from HLA sequence and locus
:param locus: string containing HLA locus.
:param sequence: string containing sequence data.
:return: GFEobject.
"""
try:
feature = self.api.get_feature_by_path(locus, term, rank, accession)
return feature # depends on [control=['try'], data=[]]
except ApiException as e:
print('Exception when calling DefaultApi->get_feature_by_path: %s\n' % e)
return '' # depends on [control=['except'], data=['e']] |
def _read_arc_record(self):
"Reads out an arc record, formats it and returns it"
#XXX:Noufal Stream payload here rather than just read it
# r = self.fileobj.readline() # Drop the initial newline
# if r == "":
# return None
# header = self.fileobj.readline()
# Strip the initial new lines and read first line
header = self.fileobj.readline()
while header and header.strip() == "":
header = self.fileobj.readline()
if header == "":
return None
if int(self.version) == 1:
arc_header_re = ARC1_HEADER_RE
elif int(self.version) == 2:
arc_header_re = ARC2_HEADER_RE
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
payload = self.fileobj.read(int(headers['length']))
self.fileobj.readline() # Munge the separator newline.
return ARCRecord(header = arc_header, payload = payload) | def function[_read_arc_record, parameter[self]]:
constant[Reads out an arc record, formats it and returns it]
variable[header] assign[=] call[name[self].fileobj.readline, parameter[]]
while <ast.BoolOp object at 0x7da1b1a75a20> begin[:]
variable[header] assign[=] call[name[self].fileobj.readline, parameter[]]
if compare[name[header] equal[==] constant[]] begin[:]
return[constant[None]]
if compare[call[name[int], parameter[name[self].version]] equal[==] constant[1]] begin[:]
variable[arc_header_re] assign[=] name[ARC1_HEADER_RE]
variable[matches] assign[=] call[name[arc_header_re].search, parameter[name[header]]]
variable[headers] assign[=] call[name[matches].groupdict, parameter[]]
variable[arc_header] assign[=] call[name[ARCHeader], parameter[]]
variable[payload] assign[=] call[name[self].fileobj.read, parameter[call[name[int], parameter[call[name[headers]][constant[length]]]]]]
call[name[self].fileobj.readline, parameter[]]
return[call[name[ARCRecord], parameter[]]] | keyword[def] identifier[_read_arc_record] ( identifier[self] ):
literal[string]
identifier[header] = identifier[self] . identifier[fileobj] . identifier[readline] ()
keyword[while] identifier[header] keyword[and] identifier[header] . identifier[strip] ()== literal[string] :
identifier[header] = identifier[self] . identifier[fileobj] . identifier[readline] ()
keyword[if] identifier[header] == literal[string] :
keyword[return] keyword[None]
keyword[if] identifier[int] ( identifier[self] . identifier[version] )== literal[int] :
identifier[arc_header_re] = identifier[ARC1_HEADER_RE]
keyword[elif] identifier[int] ( identifier[self] . identifier[version] )== literal[int] :
identifier[arc_header_re] = identifier[ARC2_HEADER_RE]
identifier[matches] = identifier[arc_header_re] . identifier[search] ( identifier[header] )
identifier[headers] = identifier[matches] . identifier[groupdict] ()
identifier[arc_header] = identifier[ARCHeader] (** identifier[headers] )
identifier[payload] = identifier[self] . identifier[fileobj] . identifier[read] ( identifier[int] ( identifier[headers] [ literal[string] ]))
identifier[self] . identifier[fileobj] . identifier[readline] ()
keyword[return] identifier[ARCRecord] ( identifier[header] = identifier[arc_header] , identifier[payload] = identifier[payload] ) | def _read_arc_record(self):
"""Reads out an arc record, formats it and returns it"""
#XXX:Noufal Stream payload here rather than just read it
# r = self.fileobj.readline() # Drop the initial newline
# if r == "":
# return None
# header = self.fileobj.readline()
# Strip the initial new lines and read first line
header = self.fileobj.readline()
while header and header.strip() == '':
header = self.fileobj.readline() # depends on [control=['while'], data=[]]
if header == '':
return None # depends on [control=['if'], data=[]]
if int(self.version) == 1:
arc_header_re = ARC1_HEADER_RE # depends on [control=['if'], data=[]]
elif int(self.version) == 2:
arc_header_re = ARC2_HEADER_RE # depends on [control=['if'], data=[]]
matches = arc_header_re.search(header)
headers = matches.groupdict()
arc_header = ARCHeader(**headers)
payload = self.fileobj.read(int(headers['length']))
self.fileobj.readline() # Munge the separator newline.
return ARCRecord(header=arc_header, payload=payload) |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return ESP_TransformSuite(key)
if key not in ESP_TransformSuite._member_map_:
extend_enum(ESP_TransformSuite, key, default)
return ESP_TransformSuite[key] | def function[get, parameter[key, default]]:
constant[Backport support for original codes.]
if call[name[isinstance], parameter[name[key], name[int]]] begin[:]
return[call[name[ESP_TransformSuite], parameter[name[key]]]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[ESP_TransformSuite]._member_map_] begin[:]
call[name[extend_enum], parameter[name[ESP_TransformSuite], name[key], name[default]]]
return[call[name[ESP_TransformSuite]][name[key]]] | keyword[def] identifier[get] ( identifier[key] , identifier[default] =- literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[key] , identifier[int] ):
keyword[return] identifier[ESP_TransformSuite] ( identifier[key] )
keyword[if] identifier[key] keyword[not] keyword[in] identifier[ESP_TransformSuite] . identifier[_member_map_] :
identifier[extend_enum] ( identifier[ESP_TransformSuite] , identifier[key] , identifier[default] )
keyword[return] identifier[ESP_TransformSuite] [ identifier[key] ] | def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return ESP_TransformSuite(key) # depends on [control=['if'], data=[]]
if key not in ESP_TransformSuite._member_map_:
extend_enum(ESP_TransformSuite, key, default) # depends on [control=['if'], data=['key']]
return ESP_TransformSuite[key] |
def lat_id(self, line):
''' Return the corresponding latitude
Args:
line (int): Line number
Returns:
Correponding latitude in degree
'''
if self.grid == 'WAC':
lat = ((1 + self.LINE_PROJECTION_OFFSET - line) *
self.MAP_SCALE * 1e-3 / self.A_AXIS_RADIUS)
return lat * 180 / np.pi
else:
lat = float(self.CENTER_LATITUDE) - \
(line - float(self.LINE_PROJECTION_OFFSET) - 1)\
/ float(self.MAP_RESOLUTION)
return lat | def function[lat_id, parameter[self, line]]:
constant[ Return the corresponding latitude
Args:
line (int): Line number
Returns:
Correponding latitude in degree
]
if compare[name[self].grid equal[==] constant[WAC]] begin[:]
variable[lat] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[1] + name[self].LINE_PROJECTION_OFFSET] - name[line]] * name[self].MAP_SCALE] * constant[0.001]] / name[self].A_AXIS_RADIUS]
return[binary_operation[binary_operation[name[lat] * constant[180]] / name[np].pi]] | keyword[def] identifier[lat_id] ( identifier[self] , identifier[line] ):
literal[string]
keyword[if] identifier[self] . identifier[grid] == literal[string] :
identifier[lat] =(( literal[int] + identifier[self] . identifier[LINE_PROJECTION_OFFSET] - identifier[line] )*
identifier[self] . identifier[MAP_SCALE] * literal[int] / identifier[self] . identifier[A_AXIS_RADIUS] )
keyword[return] identifier[lat] * literal[int] / identifier[np] . identifier[pi]
keyword[else] :
identifier[lat] = identifier[float] ( identifier[self] . identifier[CENTER_LATITUDE] )-( identifier[line] - identifier[float] ( identifier[self] . identifier[LINE_PROJECTION_OFFSET] )- literal[int] )/ identifier[float] ( identifier[self] . identifier[MAP_RESOLUTION] )
keyword[return] identifier[lat] | def lat_id(self, line):
""" Return the corresponding latitude
Args:
line (int): Line number
Returns:
Correponding latitude in degree
"""
if self.grid == 'WAC':
lat = (1 + self.LINE_PROJECTION_OFFSET - line) * self.MAP_SCALE * 0.001 / self.A_AXIS_RADIUS
return lat * 180 / np.pi # depends on [control=['if'], data=[]]
else:
lat = float(self.CENTER_LATITUDE) - (line - float(self.LINE_PROJECTION_OFFSET) - 1) / float(self.MAP_RESOLUTION)
return lat |
def lorenz_curve(y):
"""
Calculates the Lorenz Curve, a graphical representation of the distribution of income
or wealth.
It returns the cumulative share of people (x-axis) and the cumulative share of income earned
Parameters
----------
y : array_like(float or int, ndim=1)
Array of income/wealth for each individual. Unordered or ordered is fine.
Returns
-------
cum_people : array_like(float, ndim=1)
Cumulative share of people for each person index (i/n)
cum_income : array_like(float, ndim=1)
Cumulative share of income for each person index
References
----------
.. [1] https://en.wikipedia.org/wiki/Lorenz_curve
Examples
--------
>>> a_val, n = 3, 10_000
>>> y = np.random.pareto(a_val, size=n)
>>> f_vals, l_vals = lorenz(y)
"""
n = len(y)
y = np.sort(y)
s = np.zeros(n + 1)
s[1:] = np.cumsum(y)
cum_people = np.zeros(n + 1)
cum_income = np.zeros(n + 1)
for i in range(1, n + 1):
cum_people[i] = i / n
cum_income[i] = s[i] / s[n]
return cum_people, cum_income | def function[lorenz_curve, parameter[y]]:
constant[
Calculates the Lorenz Curve, a graphical representation of the distribution of income
or wealth.
It returns the cumulative share of people (x-axis) and the cumulative share of income earned
Parameters
----------
y : array_like(float or int, ndim=1)
Array of income/wealth for each individual. Unordered or ordered is fine.
Returns
-------
cum_people : array_like(float, ndim=1)
Cumulative share of people for each person index (i/n)
cum_income : array_like(float, ndim=1)
Cumulative share of income for each person index
References
----------
.. [1] https://en.wikipedia.org/wiki/Lorenz_curve
Examples
--------
>>> a_val, n = 3, 10_000
>>> y = np.random.pareto(a_val, size=n)
>>> f_vals, l_vals = lorenz(y)
]
variable[n] assign[=] call[name[len], parameter[name[y]]]
variable[y] assign[=] call[name[np].sort, parameter[name[y]]]
variable[s] assign[=] call[name[np].zeros, parameter[binary_operation[name[n] + constant[1]]]]
call[name[s]][<ast.Slice object at 0x7da20c6c7d00>] assign[=] call[name[np].cumsum, parameter[name[y]]]
variable[cum_people] assign[=] call[name[np].zeros, parameter[binary_operation[name[n] + constant[1]]]]
variable[cum_income] assign[=] call[name[np].zeros, parameter[binary_operation[name[n] + constant[1]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[name[n] + constant[1]]]]] begin[:]
call[name[cum_people]][name[i]] assign[=] binary_operation[name[i] / name[n]]
call[name[cum_income]][name[i]] assign[=] binary_operation[call[name[s]][name[i]] / call[name[s]][name[n]]]
return[tuple[[<ast.Name object at 0x7da204566650>, <ast.Name object at 0x7da204566ec0>]]] | keyword[def] identifier[lorenz_curve] ( identifier[y] ):
literal[string]
identifier[n] = identifier[len] ( identifier[y] )
identifier[y] = identifier[np] . identifier[sort] ( identifier[y] )
identifier[s] = identifier[np] . identifier[zeros] ( identifier[n] + literal[int] )
identifier[s] [ literal[int] :]= identifier[np] . identifier[cumsum] ( identifier[y] )
identifier[cum_people] = identifier[np] . identifier[zeros] ( identifier[n] + literal[int] )
identifier[cum_income] = identifier[np] . identifier[zeros] ( identifier[n] + literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n] + literal[int] ):
identifier[cum_people] [ identifier[i] ]= identifier[i] / identifier[n]
identifier[cum_income] [ identifier[i] ]= identifier[s] [ identifier[i] ]/ identifier[s] [ identifier[n] ]
keyword[return] identifier[cum_people] , identifier[cum_income] | def lorenz_curve(y):
"""
Calculates the Lorenz Curve, a graphical representation of the distribution of income
or wealth.
It returns the cumulative share of people (x-axis) and the cumulative share of income earned
Parameters
----------
y : array_like(float or int, ndim=1)
Array of income/wealth for each individual. Unordered or ordered is fine.
Returns
-------
cum_people : array_like(float, ndim=1)
Cumulative share of people for each person index (i/n)
cum_income : array_like(float, ndim=1)
Cumulative share of income for each person index
References
----------
.. [1] https://en.wikipedia.org/wiki/Lorenz_curve
Examples
--------
>>> a_val, n = 3, 10_000
>>> y = np.random.pareto(a_val, size=n)
>>> f_vals, l_vals = lorenz(y)
"""
n = len(y)
y = np.sort(y)
s = np.zeros(n + 1)
s[1:] = np.cumsum(y)
cum_people = np.zeros(n + 1)
cum_income = np.zeros(n + 1)
for i in range(1, n + 1):
cum_people[i] = i / n
cum_income[i] = s[i] / s[n] # depends on [control=['for'], data=['i']]
return (cum_people, cum_income) |
def fromfile(file, dtype, count, *args, **kwargs):
"""Wrapper around np.fromfile to support any file-like object."""
try:
return numpy.fromfile(file, dtype=dtype, count=count, *args, **kwargs)
except (TypeError, IOError):
return numpy.frombuffer(file.read(count * numpy.dtype(dtype).itemsize),
dtype=dtype, count=count, *args, **kwargs) | def function[fromfile, parameter[file, dtype, count]]:
constant[Wrapper around np.fromfile to support any file-like object.]
<ast.Try object at 0x7da1b04cba30> | keyword[def] identifier[fromfile] ( identifier[file] , identifier[dtype] , identifier[count] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[return] identifier[numpy] . identifier[fromfile] ( identifier[file] , identifier[dtype] = identifier[dtype] , identifier[count] = identifier[count] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] ( identifier[TypeError] , identifier[IOError] ):
keyword[return] identifier[numpy] . identifier[frombuffer] ( identifier[file] . identifier[read] ( identifier[count] * identifier[numpy] . identifier[dtype] ( identifier[dtype] ). identifier[itemsize] ),
identifier[dtype] = identifier[dtype] , identifier[count] = identifier[count] ,* identifier[args] ,** identifier[kwargs] ) | def fromfile(file, dtype, count, *args, **kwargs):
"""Wrapper around np.fromfile to support any file-like object."""
try:
return numpy.fromfile(file, *args, dtype=dtype, count=count, **kwargs) # depends on [control=['try'], data=[]]
except (TypeError, IOError):
return numpy.frombuffer(file.read(count * numpy.dtype(dtype).itemsize), *args, dtype=dtype, count=count, **kwargs) # depends on [control=['except'], data=[]] |
def move(src, dst):
"""A re-implementation of shutil.move that properly removes the source
directory on windows when it has read-only files in it and the move is
between two drives.
This is almost identical to the real shutil.move, except it uses our rmtree
and skips handling non-windows OSes since the existing one works ok there.
"""
if os.name != 'nt':
return shutil.move(src, dst)
if os.path.isdir(dst):
if _absnorm(src) == _absnorm(dst):
os.rename(src, dst)
return
dst = os.path.join(dst, os.path.basename(src.rstrip('/\\')))
if os.path.exists(dst):
raise EnvironmentError("Path '{}' already exists".format(dst))
try:
os.rename(src, dst)
except OSError:
# probably different drives
if os.path.isdir(src):
if _absnorm(dst + '\\').startswith(_absnorm(src + '\\')):
# dst is inside src
raise EnvironmentError(
"Cannot move a directory '{}' into itself '{}'"
.format(src, dst)
)
shutil.copytree(src, dst, symlinks=True)
rmtree(src)
else:
shutil.copy2(src, dst)
os.unlink(src) | def function[move, parameter[src, dst]]:
constant[A re-implementation of shutil.move that properly removes the source
directory on windows when it has read-only files in it and the move is
between two drives.
This is almost identical to the real shutil.move, except it uses our rmtree
and skips handling non-windows OSes since the existing one works ok there.
]
if compare[name[os].name not_equal[!=] constant[nt]] begin[:]
return[call[name[shutil].move, parameter[name[src], name[dst]]]]
if call[name[os].path.isdir, parameter[name[dst]]] begin[:]
if compare[call[name[_absnorm], parameter[name[src]]] equal[==] call[name[_absnorm], parameter[name[dst]]]] begin[:]
call[name[os].rename, parameter[name[src], name[dst]]]
return[None]
variable[dst] assign[=] call[name[os].path.join, parameter[name[dst], call[name[os].path.basename, parameter[call[name[src].rstrip, parameter[constant[/\]]]]]]]
if call[name[os].path.exists, parameter[name[dst]]] begin[:]
<ast.Raise object at 0x7da1b1b3e740>
<ast.Try object at 0x7da1b1b3ce20> | keyword[def] identifier[move] ( identifier[src] , identifier[dst] ):
literal[string]
keyword[if] identifier[os] . identifier[name] != literal[string] :
keyword[return] identifier[shutil] . identifier[move] ( identifier[src] , identifier[dst] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[dst] ):
keyword[if] identifier[_absnorm] ( identifier[src] )== identifier[_absnorm] ( identifier[dst] ):
identifier[os] . identifier[rename] ( identifier[src] , identifier[dst] )
keyword[return]
identifier[dst] = identifier[os] . identifier[path] . identifier[join] ( identifier[dst] , identifier[os] . identifier[path] . identifier[basename] ( identifier[src] . identifier[rstrip] ( literal[string] )))
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[dst] ):
keyword[raise] identifier[EnvironmentError] ( literal[string] . identifier[format] ( identifier[dst] ))
keyword[try] :
identifier[os] . identifier[rename] ( identifier[src] , identifier[dst] )
keyword[except] identifier[OSError] :
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[src] ):
keyword[if] identifier[_absnorm] ( identifier[dst] + literal[string] ). identifier[startswith] ( identifier[_absnorm] ( identifier[src] + literal[string] )):
keyword[raise] identifier[EnvironmentError] (
literal[string]
. identifier[format] ( identifier[src] , identifier[dst] )
)
identifier[shutil] . identifier[copytree] ( identifier[src] , identifier[dst] , identifier[symlinks] = keyword[True] )
identifier[rmtree] ( identifier[src] )
keyword[else] :
identifier[shutil] . identifier[copy2] ( identifier[src] , identifier[dst] )
identifier[os] . identifier[unlink] ( identifier[src] ) | def move(src, dst):
"""A re-implementation of shutil.move that properly removes the source
directory on windows when it has read-only files in it and the move is
between two drives.
This is almost identical to the real shutil.move, except it uses our rmtree
and skips handling non-windows OSes since the existing one works ok there.
"""
if os.name != 'nt':
return shutil.move(src, dst) # depends on [control=['if'], data=[]]
if os.path.isdir(dst):
if _absnorm(src) == _absnorm(dst):
os.rename(src, dst)
return # depends on [control=['if'], data=[]]
dst = os.path.join(dst, os.path.basename(src.rstrip('/\\')))
if os.path.exists(dst):
raise EnvironmentError("Path '{}' already exists".format(dst)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
try:
os.rename(src, dst) # depends on [control=['try'], data=[]]
except OSError:
# probably different drives
if os.path.isdir(src):
if _absnorm(dst + '\\').startswith(_absnorm(src + '\\')):
# dst is inside src
raise EnvironmentError("Cannot move a directory '{}' into itself '{}'".format(src, dst)) # depends on [control=['if'], data=[]]
shutil.copytree(src, dst, symlinks=True)
rmtree(src) # depends on [control=['if'], data=[]]
else:
shutil.copy2(src, dst)
os.unlink(src) # depends on [control=['except'], data=[]] |
def get_msms_annotations(self, representatives_only=True, force_rerun=False):
"""Run MSMS on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-msms']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
for g in tqdm(self.genes):
g.protein.get_msms_annotations(representative_only=representatives_only, force_rerun=force_rerun) | def function[get_msms_annotations, parameter[self, representatives_only, force_rerun]]:
constant[Run MSMS on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-msms']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
]
for taget[name[g]] in starred[call[name[tqdm], parameter[name[self].genes]]] begin[:]
call[name[g].protein.get_msms_annotations, parameter[]] | keyword[def] identifier[get_msms_annotations] ( identifier[self] , identifier[representatives_only] = keyword[True] , identifier[force_rerun] = keyword[False] ):
literal[string]
keyword[for] identifier[g] keyword[in] identifier[tqdm] ( identifier[self] . identifier[genes] ):
identifier[g] . identifier[protein] . identifier[get_msms_annotations] ( identifier[representative_only] = identifier[representatives_only] , identifier[force_rerun] = identifier[force_rerun] ) | def get_msms_annotations(self, representatives_only=True, force_rerun=False):
"""Run MSMS on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-msms']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
for g in tqdm(self.genes):
g.protein.get_msms_annotations(representative_only=representatives_only, force_rerun=force_rerun) # depends on [control=['for'], data=['g']] |
def transform_assign_magic(line):
"""Handle the `a = %who` syntax."""
m = _assign_magic_re.match(line)
if m is not None:
cmd = m.group('cmd')
lhs = m.group('lhs')
new_line = '%s = get_ipython().magic(%r)' % (lhs, cmd)
return new_line
return line | def function[transform_assign_magic, parameter[line]]:
constant[Handle the `a = %who` syntax.]
variable[m] assign[=] call[name[_assign_magic_re].match, parameter[name[line]]]
if compare[name[m] is_not constant[None]] begin[:]
variable[cmd] assign[=] call[name[m].group, parameter[constant[cmd]]]
variable[lhs] assign[=] call[name[m].group, parameter[constant[lhs]]]
variable[new_line] assign[=] binary_operation[constant[%s = get_ipython().magic(%r)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b021ca00>, <ast.Name object at 0x7da1b021c880>]]]
return[name[new_line]]
return[name[line]] | keyword[def] identifier[transform_assign_magic] ( identifier[line] ):
literal[string]
identifier[m] = identifier[_assign_magic_re] . identifier[match] ( identifier[line] )
keyword[if] identifier[m] keyword[is] keyword[not] keyword[None] :
identifier[cmd] = identifier[m] . identifier[group] ( literal[string] )
identifier[lhs] = identifier[m] . identifier[group] ( literal[string] )
identifier[new_line] = literal[string] %( identifier[lhs] , identifier[cmd] )
keyword[return] identifier[new_line]
keyword[return] identifier[line] | def transform_assign_magic(line):
"""Handle the `a = %who` syntax."""
m = _assign_magic_re.match(line)
if m is not None:
cmd = m.group('cmd')
lhs = m.group('lhs')
new_line = '%s = get_ipython().magic(%r)' % (lhs, cmd)
return new_line # depends on [control=['if'], data=['m']]
return line |
def ensure_queue_count(
self, queued, url, timeout, auth, acs_url, ssl_verify, tags=None, label_tags=None, group=None
):
"""
Ensure `marathon.queue.count` is reported as zero for apps without queued instances.
"""
metric_name = '{}.count'.format(self.QUEUE_PREFIX)
apps_response = self.get_apps_json(url, timeout, auth, acs_url, ssl_verify, tags, group)
for app in apps_response['apps']:
if app['id'] not in queued:
q_tags = self.get_app_tags(app, tags, label_tags)
self.gauge(metric_name, 0, tags=q_tags) | def function[ensure_queue_count, parameter[self, queued, url, timeout, auth, acs_url, ssl_verify, tags, label_tags, group]]:
constant[
Ensure `marathon.queue.count` is reported as zero for apps without queued instances.
]
variable[metric_name] assign[=] call[constant[{}.count].format, parameter[name[self].QUEUE_PREFIX]]
variable[apps_response] assign[=] call[name[self].get_apps_json, parameter[name[url], name[timeout], name[auth], name[acs_url], name[ssl_verify], name[tags], name[group]]]
for taget[name[app]] in starred[call[name[apps_response]][constant[apps]]] begin[:]
if compare[call[name[app]][constant[id]] <ast.NotIn object at 0x7da2590d7190> name[queued]] begin[:]
variable[q_tags] assign[=] call[name[self].get_app_tags, parameter[name[app], name[tags], name[label_tags]]]
call[name[self].gauge, parameter[name[metric_name], constant[0]]] | keyword[def] identifier[ensure_queue_count] (
identifier[self] , identifier[queued] , identifier[url] , identifier[timeout] , identifier[auth] , identifier[acs_url] , identifier[ssl_verify] , identifier[tags] = keyword[None] , identifier[label_tags] = keyword[None] , identifier[group] = keyword[None]
):
literal[string]
identifier[metric_name] = literal[string] . identifier[format] ( identifier[self] . identifier[QUEUE_PREFIX] )
identifier[apps_response] = identifier[self] . identifier[get_apps_json] ( identifier[url] , identifier[timeout] , identifier[auth] , identifier[acs_url] , identifier[ssl_verify] , identifier[tags] , identifier[group] )
keyword[for] identifier[app] keyword[in] identifier[apps_response] [ literal[string] ]:
keyword[if] identifier[app] [ literal[string] ] keyword[not] keyword[in] identifier[queued] :
identifier[q_tags] = identifier[self] . identifier[get_app_tags] ( identifier[app] , identifier[tags] , identifier[label_tags] )
identifier[self] . identifier[gauge] ( identifier[metric_name] , literal[int] , identifier[tags] = identifier[q_tags] ) | def ensure_queue_count(self, queued, url, timeout, auth, acs_url, ssl_verify, tags=None, label_tags=None, group=None):
"""
Ensure `marathon.queue.count` is reported as zero for apps without queued instances.
"""
metric_name = '{}.count'.format(self.QUEUE_PREFIX)
apps_response = self.get_apps_json(url, timeout, auth, acs_url, ssl_verify, tags, group)
for app in apps_response['apps']:
if app['id'] not in queued:
q_tags = self.get_app_tags(app, tags, label_tags)
self.gauge(metric_name, 0, tags=q_tags) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['app']] |
def export(request, count, name='', content_type=None):
"""
Export banners.
:Parameters:
- `count`: number of objects to pass into the template
- `name`: name of the template ( page/export/banner.html is default )
- `models`: list of Model classes to include
"""
t_list = []
if name:
t_list.append('page/export/%s.html' % name)
t_list.append('page/export/banner.html')
try:
cat = Category.objects.get_by_tree_path('')
except Category.DoesNotExist:
raise Http404()
listing = Listing.objects.get_listing(count=count, category=cat)
return render(
request,
t_list,
{ 'category' : cat, 'listing' : listing },
content_type=content_type
) | def function[export, parameter[request, count, name, content_type]]:
constant[
Export banners.
:Parameters:
- `count`: number of objects to pass into the template
- `name`: name of the template ( page/export/banner.html is default )
- `models`: list of Model classes to include
]
variable[t_list] assign[=] list[[]]
if name[name] begin[:]
call[name[t_list].append, parameter[binary_operation[constant[page/export/%s.html] <ast.Mod object at 0x7da2590d6920> name[name]]]]
call[name[t_list].append, parameter[constant[page/export/banner.html]]]
<ast.Try object at 0x7da18ede6410>
variable[listing] assign[=] call[name[Listing].objects.get_listing, parameter[]]
return[call[name[render], parameter[name[request], name[t_list], dictionary[[<ast.Constant object at 0x7da18ede5090>, <ast.Constant object at 0x7da18ede4190>], [<ast.Name object at 0x7da18ede4b80>, <ast.Name object at 0x7da18ede4ca0>]]]]] | keyword[def] identifier[export] ( identifier[request] , identifier[count] , identifier[name] = literal[string] , identifier[content_type] = keyword[None] ):
literal[string]
identifier[t_list] =[]
keyword[if] identifier[name] :
identifier[t_list] . identifier[append] ( literal[string] % identifier[name] )
identifier[t_list] . identifier[append] ( literal[string] )
keyword[try] :
identifier[cat] = identifier[Category] . identifier[objects] . identifier[get_by_tree_path] ( literal[string] )
keyword[except] identifier[Category] . identifier[DoesNotExist] :
keyword[raise] identifier[Http404] ()
identifier[listing] = identifier[Listing] . identifier[objects] . identifier[get_listing] ( identifier[count] = identifier[count] , identifier[category] = identifier[cat] )
keyword[return] identifier[render] (
identifier[request] ,
identifier[t_list] ,
{ literal[string] : identifier[cat] , literal[string] : identifier[listing] },
identifier[content_type] = identifier[content_type]
) | def export(request, count, name='', content_type=None):
"""
Export banners.
:Parameters:
- `count`: number of objects to pass into the template
- `name`: name of the template ( page/export/banner.html is default )
- `models`: list of Model classes to include
"""
t_list = []
if name:
t_list.append('page/export/%s.html' % name) # depends on [control=['if'], data=[]]
t_list.append('page/export/banner.html')
try:
cat = Category.objects.get_by_tree_path('') # depends on [control=['try'], data=[]]
except Category.DoesNotExist:
raise Http404() # depends on [control=['except'], data=[]]
listing = Listing.objects.get_listing(count=count, category=cat)
return render(request, t_list, {'category': cat, 'listing': listing}, content_type=content_type) |
def get_matrix(self, indices):
"""Retrieve Saltelli matrix."""
new = numpy.empty(self.samples1.shape)
for idx in range(len(indices)):
if indices[idx]:
new[idx] = self.samples1[idx]
else:
new[idx] = self.samples2[idx]
if self.poly:
new = self.poly(*new)
return new | def function[get_matrix, parameter[self, indices]]:
constant[Retrieve Saltelli matrix.]
variable[new] assign[=] call[name[numpy].empty, parameter[name[self].samples1.shape]]
for taget[name[idx]] in starred[call[name[range], parameter[call[name[len], parameter[name[indices]]]]]] begin[:]
if call[name[indices]][name[idx]] begin[:]
call[name[new]][name[idx]] assign[=] call[name[self].samples1][name[idx]]
if name[self].poly begin[:]
variable[new] assign[=] call[name[self].poly, parameter[<ast.Starred object at 0x7da18f09da80>]]
return[name[new]] | keyword[def] identifier[get_matrix] ( identifier[self] , identifier[indices] ):
literal[string]
identifier[new] = identifier[numpy] . identifier[empty] ( identifier[self] . identifier[samples1] . identifier[shape] )
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[len] ( identifier[indices] )):
keyword[if] identifier[indices] [ identifier[idx] ]:
identifier[new] [ identifier[idx] ]= identifier[self] . identifier[samples1] [ identifier[idx] ]
keyword[else] :
identifier[new] [ identifier[idx] ]= identifier[self] . identifier[samples2] [ identifier[idx] ]
keyword[if] identifier[self] . identifier[poly] :
identifier[new] = identifier[self] . identifier[poly] (* identifier[new] )
keyword[return] identifier[new] | def get_matrix(self, indices):
"""Retrieve Saltelli matrix."""
new = numpy.empty(self.samples1.shape)
for idx in range(len(indices)):
if indices[idx]:
new[idx] = self.samples1[idx] # depends on [control=['if'], data=[]]
else:
new[idx] = self.samples2[idx] # depends on [control=['for'], data=['idx']]
if self.poly:
new = self.poly(*new) # depends on [control=['if'], data=[]]
return new |
def dispose_at_exit(exitable):
'''
register `exitable.__exit__()` into `atexit` module.
return the `exitable` itself.
'''
@atexit.register
def callback():
exitable.__exit__(*sys.exc_info())
return exitable | def function[dispose_at_exit, parameter[exitable]]:
constant[
register `exitable.__exit__()` into `atexit` module.
return the `exitable` itself.
]
def function[callback, parameter[]]:
call[name[exitable].__exit__, parameter[<ast.Starred object at 0x7da18fe90cd0>]]
return[name[exitable]] | keyword[def] identifier[dispose_at_exit] ( identifier[exitable] ):
literal[string]
@ identifier[atexit] . identifier[register]
keyword[def] identifier[callback] ():
identifier[exitable] . identifier[__exit__] (* identifier[sys] . identifier[exc_info] ())
keyword[return] identifier[exitable] | def dispose_at_exit(exitable):
"""
register `exitable.__exit__()` into `atexit` module.
return the `exitable` itself.
"""
@atexit.register
def callback():
exitable.__exit__(*sys.exc_info())
return exitable |
def postorder(self):
"""Return the nodes in the binary tree using post-order_ traversal.
A post-order_ traversal visits left subtree, right subtree, then root.
.. _post-order: https://en.wikipedia.org/wiki/Tree_traversal
:return: List of nodes.
:rtype: [binarytree.Node]
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
/ \\
4 5
<BLANKLINE>
>>> root.postorder
[Node(4), Node(5), Node(2), Node(3), Node(1)]
"""
node_stack = []
result = []
node = self
while True:
while node is not None:
if node.right is not None:
node_stack.append(node.right)
node_stack.append(node)
node = node.left
node = node_stack.pop()
if (node.right is not None and
len(node_stack) > 0 and
node_stack[-1] is node.right):
node_stack.pop()
node_stack.append(node)
node = node.right
else:
result.append(node)
node = None
if len(node_stack) == 0:
break
return result | def function[postorder, parameter[self]]:
constant[Return the nodes in the binary tree using post-order_ traversal.
A post-order_ traversal visits left subtree, right subtree, then root.
.. _post-order: https://en.wikipedia.org/wiki/Tree_traversal
:return: List of nodes.
:rtype: [binarytree.Node]
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \
2 3
/ \
4 5
<BLANKLINE>
>>> root.postorder
[Node(4), Node(5), Node(2), Node(3), Node(1)]
]
variable[node_stack] assign[=] list[[]]
variable[result] assign[=] list[[]]
variable[node] assign[=] name[self]
while constant[True] begin[:]
while compare[name[node] is_not constant[None]] begin[:]
if compare[name[node].right is_not constant[None]] begin[:]
call[name[node_stack].append, parameter[name[node].right]]
call[name[node_stack].append, parameter[name[node]]]
variable[node] assign[=] name[node].left
variable[node] assign[=] call[name[node_stack].pop, parameter[]]
if <ast.BoolOp object at 0x7da20c6c7cd0> begin[:]
call[name[node_stack].pop, parameter[]]
call[name[node_stack].append, parameter[name[node]]]
variable[node] assign[=] name[node].right
if compare[call[name[len], parameter[name[node_stack]]] equal[==] constant[0]] begin[:]
break
return[name[result]] | keyword[def] identifier[postorder] ( identifier[self] ):
literal[string]
identifier[node_stack] =[]
identifier[result] =[]
identifier[node] = identifier[self]
keyword[while] keyword[True] :
keyword[while] identifier[node] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[node] . identifier[right] keyword[is] keyword[not] keyword[None] :
identifier[node_stack] . identifier[append] ( identifier[node] . identifier[right] )
identifier[node_stack] . identifier[append] ( identifier[node] )
identifier[node] = identifier[node] . identifier[left]
identifier[node] = identifier[node_stack] . identifier[pop] ()
keyword[if] ( identifier[node] . identifier[right] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[len] ( identifier[node_stack] )> literal[int] keyword[and]
identifier[node_stack] [- literal[int] ] keyword[is] identifier[node] . identifier[right] ):
identifier[node_stack] . identifier[pop] ()
identifier[node_stack] . identifier[append] ( identifier[node] )
identifier[node] = identifier[node] . identifier[right]
keyword[else] :
identifier[result] . identifier[append] ( identifier[node] )
identifier[node] = keyword[None]
keyword[if] identifier[len] ( identifier[node_stack] )== literal[int] :
keyword[break]
keyword[return] identifier[result] | def postorder(self):
"""Return the nodes in the binary tree using post-order_ traversal.
A post-order_ traversal visits left subtree, right subtree, then root.
.. _post-order: https://en.wikipedia.org/wiki/Tree_traversal
:return: List of nodes.
:rtype: [binarytree.Node]
**Example**:
.. doctest::
>>> from binarytree import Node
>>>
>>> root = Node(1)
>>> root.left = Node(2)
>>> root.right = Node(3)
>>> root.left.left = Node(4)
>>> root.left.right = Node(5)
>>>
>>> print(root)
<BLANKLINE>
__1
/ \\
2 3
/ \\
4 5
<BLANKLINE>
>>> root.postorder
[Node(4), Node(5), Node(2), Node(3), Node(1)]
"""
node_stack = []
result = []
node = self
while True:
while node is not None:
if node.right is not None:
node_stack.append(node.right) # depends on [control=['if'], data=[]]
node_stack.append(node)
node = node.left # depends on [control=['while'], data=['node']]
node = node_stack.pop()
if node.right is not None and len(node_stack) > 0 and (node_stack[-1] is node.right):
node_stack.pop()
node_stack.append(node)
node = node.right # depends on [control=['if'], data=[]]
else:
result.append(node)
node = None
if len(node_stack) == 0:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return result |
def target_heating_level(self):
"""Return target heating level."""
try:
if self.side == 'left':
level = self.device.device_data['leftTargetHeatingLevel']
elif self.side == 'right':
level = self.device.device_data['rightTargetHeatingLevel']
return level
except TypeError:
return None | def function[target_heating_level, parameter[self]]:
constant[Return target heating level.]
<ast.Try object at 0x7da18bc728f0> | keyword[def] identifier[target_heating_level] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[side] == literal[string] :
identifier[level] = identifier[self] . identifier[device] . identifier[device_data] [ literal[string] ]
keyword[elif] identifier[self] . identifier[side] == literal[string] :
identifier[level] = identifier[self] . identifier[device] . identifier[device_data] [ literal[string] ]
keyword[return] identifier[level]
keyword[except] identifier[TypeError] :
keyword[return] keyword[None] | def target_heating_level(self):
"""Return target heating level."""
try:
if self.side == 'left':
level = self.device.device_data['leftTargetHeatingLevel'] # depends on [control=['if'], data=[]]
elif self.side == 'right':
level = self.device.device_data['rightTargetHeatingLevel'] # depends on [control=['if'], data=[]]
return level # depends on [control=['try'], data=[]]
except TypeError:
return None # depends on [control=['except'], data=[]] |
def createCellsGrid (self):
''' Create population cells based on fixed number of cells'''
from .. import sim
cells = []
rangeLocs = [[0, getattr(sim.net.params, 'size'+coord)] for coord in ['X','Y','Z']]
for icoord, coord in enumerate(['x', 'y', 'z']):
# constrain to range set by user
if coord+'normRange' in self.tags: # if normalized range, convert to normalized
self.tags[coord+'Range'] = [float(point) * getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
if coord+'Range' in self.tags: # if user provided absolute range, calculate range
self.tags[coord+'normRange'] = [float(point) / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
rangeLocs[icoord] = [self.tags[coord+'Range'][0], self.tags[coord+'Range'][1]]
gridSpacing = self.tags['gridSpacing']
gridLocs = []
for x in np.arange(rangeLocs[0][0], rangeLocs[0][1]+1, gridSpacing):
for y in np.arange(rangeLocs[1][0], rangeLocs[1][1]+1, gridSpacing):
for z in np.arange(rangeLocs[2][0], rangeLocs[2][1]+1, gridSpacing):
gridLocs.append((x, y, z))
numCells = len(gridLocs)
for i in self._distributeCells(numCells)[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = gridLocs[i][0] / sim.net.params.sizeX # set x location (um)
cellTags['ynorm'] = gridLocs[i][1] / sim.net.params.sizeY # set y location (um)
cellTags['znorm'] = gridLocs[i][2] / sim.net.params.sizeZ # set z location (um)
cellTags['x'] = gridLocs[i][0] # set x location (um)
cellTags['y'] = gridLocs[i][1] # set y location (um)
cellTags['z'] = gridLocs[i][2] # set z location (um)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %s, on node %d, '%(i, numCells, gid, self.tags['pop'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + numCells
return cells | def function[createCellsGrid, parameter[self]]:
constant[ Create population cells based on fixed number of cells]
from relative_module[None] import module[sim]
variable[cells] assign[=] list[[]]
variable[rangeLocs] assign[=] <ast.ListComp object at 0x7da2044c0f40>
for taget[tuple[[<ast.Name object at 0x7da2044c1b10>, <ast.Name object at 0x7da2044c17e0>]]] in starred[call[name[enumerate], parameter[list[[<ast.Constant object at 0x7da2044c2800>, <ast.Constant object at 0x7da2044c1fc0>, <ast.Constant object at 0x7da2044c0340>]]]]] begin[:]
if compare[binary_operation[name[coord] + constant[normRange]] in name[self].tags] begin[:]
call[name[self].tags][binary_operation[name[coord] + constant[Range]]] assign[=] <ast.ListComp object at 0x7da2044c0040>
if compare[binary_operation[name[coord] + constant[Range]] in name[self].tags] begin[:]
call[name[self].tags][binary_operation[name[coord] + constant[normRange]]] assign[=] <ast.ListComp object at 0x7da18fe92770>
call[name[rangeLocs]][name[icoord]] assign[=] list[[<ast.Subscript object at 0x7da18fe93370>, <ast.Subscript object at 0x7da18fe903a0>]]
variable[gridSpacing] assign[=] call[name[self].tags][constant[gridSpacing]]
variable[gridLocs] assign[=] list[[]]
for taget[name[x]] in starred[call[name[np].arange, parameter[call[call[name[rangeLocs]][constant[0]]][constant[0]], binary_operation[call[call[name[rangeLocs]][constant[0]]][constant[1]] + constant[1]], name[gridSpacing]]]] begin[:]
for taget[name[y]] in starred[call[name[np].arange, parameter[call[call[name[rangeLocs]][constant[1]]][constant[0]], binary_operation[call[call[name[rangeLocs]][constant[1]]][constant[1]] + constant[1]], name[gridSpacing]]]] begin[:]
for taget[name[z]] in starred[call[name[np].arange, parameter[call[call[name[rangeLocs]][constant[2]]][constant[0]], binary_operation[call[call[name[rangeLocs]][constant[2]]][constant[1]] + constant[1]], name[gridSpacing]]]] begin[:]
call[name[gridLocs].append, parameter[tuple[[<ast.Name object at 0x7da18f720070>, <ast.Name object at 0x7da18f722d40>, <ast.Name object at 0x7da18f721ea0>]]]]
variable[numCells] assign[=] call[name[len], parameter[name[gridLocs]]]
for taget[name[i]] in starred[call[call[name[self]._distributeCells, parameter[name[numCells]]]][name[sim].rank]] begin[:]
variable[gid] assign[=] binary_operation[name[sim].net.lastGid + name[i]]
call[name[self].cellGids.append, parameter[name[gid]]]
variable[cellTags] assign[=] <ast.DictComp object at 0x7da1b2344880>
call[name[cellTags]][constant[pop]] assign[=] call[name[self].tags][constant[pop]]
call[name[cellTags]][constant[xnorm]] assign[=] binary_operation[call[call[name[gridLocs]][name[i]]][constant[0]] / name[sim].net.params.sizeX]
call[name[cellTags]][constant[ynorm]] assign[=] binary_operation[call[call[name[gridLocs]][name[i]]][constant[1]] / name[sim].net.params.sizeY]
call[name[cellTags]][constant[znorm]] assign[=] binary_operation[call[call[name[gridLocs]][name[i]]][constant[2]] / name[sim].net.params.sizeZ]
call[name[cellTags]][constant[x]] assign[=] call[call[name[gridLocs]][name[i]]][constant[0]]
call[name[cellTags]][constant[y]] assign[=] call[call[name[gridLocs]][name[i]]][constant[1]]
call[name[cellTags]][constant[z]] assign[=] call[call[name[gridLocs]][name[i]]][constant[2]]
call[name[cells].append, parameter[call[name[self].cellModelClass, parameter[name[gid], name[cellTags]]]]]
if name[sim].cfg.verbose begin[:]
call[name[print], parameter[binary_operation[constant[Cell %d/%d (gid=%d) of pop %s, on node %d, ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2345cf0>, <ast.Name object at 0x7da1b2347700>, <ast.Name object at 0x7da1b2344fa0>, <ast.Subscript object at 0x7da1b23456c0>, <ast.Attribute object at 0x7da1b2347a60>]]]]]
name[sim].net.lastGid assign[=] binary_operation[name[sim].net.lastGid + name[numCells]]
return[name[cells]] | keyword[def] identifier[createCellsGrid] ( identifier[self] ):
literal[string]
keyword[from] .. keyword[import] identifier[sim]
identifier[cells] =[]
identifier[rangeLocs] =[[ literal[int] , identifier[getattr] ( identifier[sim] . identifier[net] . identifier[params] , literal[string] + identifier[coord] )] keyword[for] identifier[coord] keyword[in] [ literal[string] , literal[string] , literal[string] ]]
keyword[for] identifier[icoord] , identifier[coord] keyword[in] identifier[enumerate] ([ literal[string] , literal[string] , literal[string] ]):
keyword[if] identifier[coord] + literal[string] keyword[in] identifier[self] . identifier[tags] :
identifier[self] . identifier[tags] [ identifier[coord] + literal[string] ]=[ identifier[float] ( identifier[point] )* identifier[getattr] ( identifier[sim] . identifier[net] . identifier[params] , literal[string] + identifier[coord] . identifier[upper] ()) keyword[for] identifier[point] keyword[in] identifier[self] . identifier[tags] [ identifier[coord] + literal[string] ]]
keyword[if] identifier[coord] + literal[string] keyword[in] identifier[self] . identifier[tags] :
identifier[self] . identifier[tags] [ identifier[coord] + literal[string] ]=[ identifier[float] ( identifier[point] )/ identifier[getattr] ( identifier[sim] . identifier[net] . identifier[params] , literal[string] + identifier[coord] . identifier[upper] ()) keyword[for] identifier[point] keyword[in] identifier[self] . identifier[tags] [ identifier[coord] + literal[string] ]]
identifier[rangeLocs] [ identifier[icoord] ]=[ identifier[self] . identifier[tags] [ identifier[coord] + literal[string] ][ literal[int] ], identifier[self] . identifier[tags] [ identifier[coord] + literal[string] ][ literal[int] ]]
identifier[gridSpacing] = identifier[self] . identifier[tags] [ literal[string] ]
identifier[gridLocs] =[]
keyword[for] identifier[x] keyword[in] identifier[np] . identifier[arange] ( identifier[rangeLocs] [ literal[int] ][ literal[int] ], identifier[rangeLocs] [ literal[int] ][ literal[int] ]+ literal[int] , identifier[gridSpacing] ):
keyword[for] identifier[y] keyword[in] identifier[np] . identifier[arange] ( identifier[rangeLocs] [ literal[int] ][ literal[int] ], identifier[rangeLocs] [ literal[int] ][ literal[int] ]+ literal[int] , identifier[gridSpacing] ):
keyword[for] identifier[z] keyword[in] identifier[np] . identifier[arange] ( identifier[rangeLocs] [ literal[int] ][ literal[int] ], identifier[rangeLocs] [ literal[int] ][ literal[int] ]+ literal[int] , identifier[gridSpacing] ):
identifier[gridLocs] . identifier[append] (( identifier[x] , identifier[y] , identifier[z] ))
identifier[numCells] = identifier[len] ( identifier[gridLocs] )
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_distributeCells] ( identifier[numCells] )[ identifier[sim] . identifier[rank] ]:
identifier[gid] = identifier[sim] . identifier[net] . identifier[lastGid] + identifier[i]
identifier[self] . identifier[cellGids] . identifier[append] ( identifier[gid] )
identifier[cellTags] ={ identifier[k] : identifier[v] keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[self] . identifier[tags] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[sim] . identifier[net] . identifier[params] . identifier[popTagsCopiedToCells] }
identifier[cellTags] [ literal[string] ]= identifier[self] . identifier[tags] [ literal[string] ]
identifier[cellTags] [ literal[string] ]= identifier[gridLocs] [ identifier[i] ][ literal[int] ]/ identifier[sim] . identifier[net] . identifier[params] . identifier[sizeX]
identifier[cellTags] [ literal[string] ]= identifier[gridLocs] [ identifier[i] ][ literal[int] ]/ identifier[sim] . identifier[net] . identifier[params] . identifier[sizeY]
identifier[cellTags] [ literal[string] ]= identifier[gridLocs] [ identifier[i] ][ literal[int] ]/ identifier[sim] . identifier[net] . identifier[params] . identifier[sizeZ]
identifier[cellTags] [ literal[string] ]= identifier[gridLocs] [ identifier[i] ][ literal[int] ]
identifier[cellTags] [ literal[string] ]= identifier[gridLocs] [ identifier[i] ][ literal[int] ]
identifier[cellTags] [ literal[string] ]= identifier[gridLocs] [ identifier[i] ][ literal[int] ]
identifier[cells] . identifier[append] ( identifier[self] . identifier[cellModelClass] ( identifier[gid] , identifier[cellTags] ))
keyword[if] identifier[sim] . identifier[cfg] . identifier[verbose] : identifier[print] (( literal[string] %( identifier[i] , identifier[numCells] , identifier[gid] , identifier[self] . identifier[tags] [ literal[string] ], identifier[sim] . identifier[rank] )))
identifier[sim] . identifier[net] . identifier[lastGid] = identifier[sim] . identifier[net] . identifier[lastGid] + identifier[numCells]
keyword[return] identifier[cells] | def createCellsGrid(self):
""" Create population cells based on fixed number of cells"""
from .. import sim
cells = []
rangeLocs = [[0, getattr(sim.net.params, 'size' + coord)] for coord in ['X', 'Y', 'Z']]
for (icoord, coord) in enumerate(['x', 'y', 'z']):
# constrain to range set by user
if coord + 'normRange' in self.tags: # if normalized range, convert to normalized
self.tags[coord + 'Range'] = [float(point) * getattr(sim.net.params, 'size' + coord.upper()) for point in self.tags[coord + 'Range']] # depends on [control=['if'], data=[]]
if coord + 'Range' in self.tags: # if user provided absolute range, calculate range
self.tags[coord + 'normRange'] = [float(point) / getattr(sim.net.params, 'size' + coord.upper()) for point in self.tags[coord + 'Range']]
rangeLocs[icoord] = [self.tags[coord + 'Range'][0], self.tags[coord + 'Range'][1]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
gridSpacing = self.tags['gridSpacing']
gridLocs = []
for x in np.arange(rangeLocs[0][0], rangeLocs[0][1] + 1, gridSpacing):
for y in np.arange(rangeLocs[1][0], rangeLocs[1][1] + 1, gridSpacing):
for z in np.arange(rangeLocs[2][0], rangeLocs[2][1] + 1, gridSpacing):
gridLocs.append((x, y, z)) # depends on [control=['for'], data=['z']] # depends on [control=['for'], data=['y']] # depends on [control=['for'], data=['x']]
numCells = len(gridLocs)
for i in self._distributeCells(numCells)[sim.rank]:
gid = sim.net.lastGid + i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = gridLocs[i][0] / sim.net.params.sizeX # set x location (um)
cellTags['ynorm'] = gridLocs[i][1] / sim.net.params.sizeY # set y location (um)
cellTags['znorm'] = gridLocs[i][2] / sim.net.params.sizeZ # set z location (um)
cellTags['x'] = gridLocs[i][0] # set x location (um)
cellTags['y'] = gridLocs[i][1] # set y location (um)
cellTags['z'] = gridLocs[i][2] # set z location (um)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose:
print('Cell %d/%d (gid=%d) of pop %s, on node %d, ' % (i, numCells, gid, self.tags['pop'], sim.rank)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
sim.net.lastGid = sim.net.lastGid + numCells
return cells |
def make_lda_variational(activation, num_topics, layer_sizes):
"""Creates the variational distribution for LDA.
Args:
activation: Activation function to use.
num_topics: The number of topics.
layer_sizes: The number of hidden units per layer in the encoder.
Returns:
lda_variational: A function that takes a bag-of-words Tensor as
input and returns a distribution over topics.
"""
encoder_net = tf.keras.Sequential()
for num_hidden_units in layer_sizes:
encoder_net.add(
tf.keras.layers.Dense(
num_hidden_units,
activation=activation,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
encoder_net.add(
tf.keras.layers.Dense(
num_topics,
activation=tf.nn.softplus,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
def lda_variational(bag_of_words):
concentration = _clip_dirichlet_parameters(encoder_net(bag_of_words))
return ed.Dirichlet(concentration=concentration, name="topics_posterior")
return lda_variational | def function[make_lda_variational, parameter[activation, num_topics, layer_sizes]]:
constant[Creates the variational distribution for LDA.
Args:
activation: Activation function to use.
num_topics: The number of topics.
layer_sizes: The number of hidden units per layer in the encoder.
Returns:
lda_variational: A function that takes a bag-of-words Tensor as
input and returns a distribution over topics.
]
variable[encoder_net] assign[=] call[name[tf].keras.Sequential, parameter[]]
for taget[name[num_hidden_units]] in starred[name[layer_sizes]] begin[:]
call[name[encoder_net].add, parameter[call[name[tf].keras.layers.Dense, parameter[name[num_hidden_units]]]]]
call[name[encoder_net].add, parameter[call[name[tf].keras.layers.Dense, parameter[name[num_topics]]]]]
def function[lda_variational, parameter[bag_of_words]]:
variable[concentration] assign[=] call[name[_clip_dirichlet_parameters], parameter[call[name[encoder_net], parameter[name[bag_of_words]]]]]
return[call[name[ed].Dirichlet, parameter[]]]
return[name[lda_variational]] | keyword[def] identifier[make_lda_variational] ( identifier[activation] , identifier[num_topics] , identifier[layer_sizes] ):
literal[string]
identifier[encoder_net] = identifier[tf] . identifier[keras] . identifier[Sequential] ()
keyword[for] identifier[num_hidden_units] keyword[in] identifier[layer_sizes] :
identifier[encoder_net] . identifier[add] (
identifier[tf] . identifier[keras] . identifier[layers] . identifier[Dense] (
identifier[num_hidden_units] ,
identifier[activation] = identifier[activation] ,
identifier[kernel_initializer] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[glorot_normal_initializer] ()))
identifier[encoder_net] . identifier[add] (
identifier[tf] . identifier[keras] . identifier[layers] . identifier[Dense] (
identifier[num_topics] ,
identifier[activation] = identifier[tf] . identifier[nn] . identifier[softplus] ,
identifier[kernel_initializer] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[glorot_normal_initializer] ()))
keyword[def] identifier[lda_variational] ( identifier[bag_of_words] ):
identifier[concentration] = identifier[_clip_dirichlet_parameters] ( identifier[encoder_net] ( identifier[bag_of_words] ))
keyword[return] identifier[ed] . identifier[Dirichlet] ( identifier[concentration] = identifier[concentration] , identifier[name] = literal[string] )
keyword[return] identifier[lda_variational] | def make_lda_variational(activation, num_topics, layer_sizes):
"""Creates the variational distribution for LDA.
Args:
activation: Activation function to use.
num_topics: The number of topics.
layer_sizes: The number of hidden units per layer in the encoder.
Returns:
lda_variational: A function that takes a bag-of-words Tensor as
input and returns a distribution over topics.
"""
encoder_net = tf.keras.Sequential()
for num_hidden_units in layer_sizes:
encoder_net.add(tf.keras.layers.Dense(num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) # depends on [control=['for'], data=['num_hidden_units']]
encoder_net.add(tf.keras.layers.Dense(num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
def lda_variational(bag_of_words):
concentration = _clip_dirichlet_parameters(encoder_net(bag_of_words))
return ed.Dirichlet(concentration=concentration, name='topics_posterior')
return lda_variational |
def bot(app, debug):
"""Run the experiment bot."""
if debug is None:
verify_id(None, None, app)
(id, tmp) = setup_experiment(log)
if debug:
url = debug
else:
heroku_app = HerokuApp(dallinger_uid=app)
worker = generate_random_id()
hit = generate_random_id()
assignment = generate_random_id()
ad_url = "{}/ad".format(heroku_app.url)
ad_parameters = "assignmentId={}&hitId={}&workerId={}&mode=sandbox"
ad_parameters = ad_parameters.format(assignment, hit, worker)
url = "{}?{}".format(ad_url, ad_parameters)
bot = bot_factory(url)
bot.run_experiment() | def function[bot, parameter[app, debug]]:
constant[Run the experiment bot.]
if compare[name[debug] is constant[None]] begin[:]
call[name[verify_id], parameter[constant[None], constant[None], name[app]]]
<ast.Tuple object at 0x7da1b0382cb0> assign[=] call[name[setup_experiment], parameter[name[log]]]
if name[debug] begin[:]
variable[url] assign[=] name[debug]
variable[bot] assign[=] call[name[bot_factory], parameter[name[url]]]
call[name[bot].run_experiment, parameter[]] | keyword[def] identifier[bot] ( identifier[app] , identifier[debug] ):
literal[string]
keyword[if] identifier[debug] keyword[is] keyword[None] :
identifier[verify_id] ( keyword[None] , keyword[None] , identifier[app] )
( identifier[id] , identifier[tmp] )= identifier[setup_experiment] ( identifier[log] )
keyword[if] identifier[debug] :
identifier[url] = identifier[debug]
keyword[else] :
identifier[heroku_app] = identifier[HerokuApp] ( identifier[dallinger_uid] = identifier[app] )
identifier[worker] = identifier[generate_random_id] ()
identifier[hit] = identifier[generate_random_id] ()
identifier[assignment] = identifier[generate_random_id] ()
identifier[ad_url] = literal[string] . identifier[format] ( identifier[heroku_app] . identifier[url] )
identifier[ad_parameters] = literal[string]
identifier[ad_parameters] = identifier[ad_parameters] . identifier[format] ( identifier[assignment] , identifier[hit] , identifier[worker] )
identifier[url] = literal[string] . identifier[format] ( identifier[ad_url] , identifier[ad_parameters] )
identifier[bot] = identifier[bot_factory] ( identifier[url] )
identifier[bot] . identifier[run_experiment] () | def bot(app, debug):
"""Run the experiment bot."""
if debug is None:
verify_id(None, None, app) # depends on [control=['if'], data=[]]
(id, tmp) = setup_experiment(log)
if debug:
url = debug # depends on [control=['if'], data=[]]
else:
heroku_app = HerokuApp(dallinger_uid=app)
worker = generate_random_id()
hit = generate_random_id()
assignment = generate_random_id()
ad_url = '{}/ad'.format(heroku_app.url)
ad_parameters = 'assignmentId={}&hitId={}&workerId={}&mode=sandbox'
ad_parameters = ad_parameters.format(assignment, hit, worker)
url = '{}?{}'.format(ad_url, ad_parameters)
bot = bot_factory(url)
bot.run_experiment() |
def to_xml(self):
"""Convert to XML message."""
element = etree.Element(self._tag_name)
struct_to_xml(element, [
{"author": self.handle},
{"target_guid": self.target_guid},
{"target_type": DiasporaRetraction.entity_type_to_remote(self.entity_type)},
])
return element | def function[to_xml, parameter[self]]:
constant[Convert to XML message.]
variable[element] assign[=] call[name[etree].Element, parameter[name[self]._tag_name]]
call[name[struct_to_xml], parameter[name[element], list[[<ast.Dict object at 0x7da1b05353f0>, <ast.Dict object at 0x7da1b0536cb0>, <ast.Dict object at 0x7da1b0535660>]]]]
return[name[element]] | keyword[def] identifier[to_xml] ( identifier[self] ):
literal[string]
identifier[element] = identifier[etree] . identifier[Element] ( identifier[self] . identifier[_tag_name] )
identifier[struct_to_xml] ( identifier[element] ,[
{ literal[string] : identifier[self] . identifier[handle] },
{ literal[string] : identifier[self] . identifier[target_guid] },
{ literal[string] : identifier[DiasporaRetraction] . identifier[entity_type_to_remote] ( identifier[self] . identifier[entity_type] )},
])
keyword[return] identifier[element] | def to_xml(self):
"""Convert to XML message."""
element = etree.Element(self._tag_name)
struct_to_xml(element, [{'author': self.handle}, {'target_guid': self.target_guid}, {'target_type': DiasporaRetraction.entity_type_to_remote(self.entity_type)}])
return element |
def expand(self, short):
"""Expand short URL or keyword to long URL.
Parameters:
short: Short URL (``http://example.com/abc``) or keyword (abc).
:return: Expanded/long URL, e.g.
``https://www.youtube.com/watch?v=dQw4w9WgXcQ``
Raises:
~yourls.exceptions.YOURLSHTTPError: HTTP error with response from
YOURLS API.
requests.exceptions.HTTPError: Generic HTTP error.
"""
data = dict(action='expand', shorturl=short)
jsondata = self._api_request(params=data)
return jsondata['longurl'] | def function[expand, parameter[self, short]]:
constant[Expand short URL or keyword to long URL.
Parameters:
short: Short URL (``http://example.com/abc``) or keyword (abc).
:return: Expanded/long URL, e.g.
``https://www.youtube.com/watch?v=dQw4w9WgXcQ``
Raises:
~yourls.exceptions.YOURLSHTTPError: HTTP error with response from
YOURLS API.
requests.exceptions.HTTPError: Generic HTTP error.
]
variable[data] assign[=] call[name[dict], parameter[]]
variable[jsondata] assign[=] call[name[self]._api_request, parameter[]]
return[call[name[jsondata]][constant[longurl]]] | keyword[def] identifier[expand] ( identifier[self] , identifier[short] ):
literal[string]
identifier[data] = identifier[dict] ( identifier[action] = literal[string] , identifier[shorturl] = identifier[short] )
identifier[jsondata] = identifier[self] . identifier[_api_request] ( identifier[params] = identifier[data] )
keyword[return] identifier[jsondata] [ literal[string] ] | def expand(self, short):
"""Expand short URL or keyword to long URL.
Parameters:
short: Short URL (``http://example.com/abc``) or keyword (abc).
:return: Expanded/long URL, e.g.
``https://www.youtube.com/watch?v=dQw4w9WgXcQ``
Raises:
~yourls.exceptions.YOURLSHTTPError: HTTP error with response from
YOURLS API.
requests.exceptions.HTTPError: Generic HTTP error.
"""
data = dict(action='expand', shorturl=short)
jsondata = self._api_request(params=data)
return jsondata['longurl'] |
def sort_set(s):
"""Return a sorted list of the contents of a set
This is intended to be used to iterate over world state, where you just need keys
to be in some deterministic order, but the sort order should be obvious from the key.
Non-strings come before strings and then tuples. Tuples compare element-wise as normal.
But ultimately all comparisons are between values' ``repr``.
This is memoized.
"""
if not isinstance(s, Set):
raise TypeError("sets only")
s = frozenset(s)
if s not in _sort_set_memo:
_sort_set_memo[s] = sorted(s, key=_sort_set_key)
return _sort_set_memo[s] | def function[sort_set, parameter[s]]:
constant[Return a sorted list of the contents of a set
This is intended to be used to iterate over world state, where you just need keys
to be in some deterministic order, but the sort order should be obvious from the key.
Non-strings come before strings and then tuples. Tuples compare element-wise as normal.
But ultimately all comparisons are between values' ``repr``.
This is memoized.
]
if <ast.UnaryOp object at 0x7da1b0e24f70> begin[:]
<ast.Raise object at 0x7da1b0c53760>
variable[s] assign[=] call[name[frozenset], parameter[name[s]]]
if compare[name[s] <ast.NotIn object at 0x7da2590d7190> name[_sort_set_memo]] begin[:]
call[name[_sort_set_memo]][name[s]] assign[=] call[name[sorted], parameter[name[s]]]
return[call[name[_sort_set_memo]][name[s]]] | keyword[def] identifier[sort_set] ( identifier[s] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[s] , identifier[Set] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[s] = identifier[frozenset] ( identifier[s] )
keyword[if] identifier[s] keyword[not] keyword[in] identifier[_sort_set_memo] :
identifier[_sort_set_memo] [ identifier[s] ]= identifier[sorted] ( identifier[s] , identifier[key] = identifier[_sort_set_key] )
keyword[return] identifier[_sort_set_memo] [ identifier[s] ] | def sort_set(s):
"""Return a sorted list of the contents of a set
This is intended to be used to iterate over world state, where you just need keys
to be in some deterministic order, but the sort order should be obvious from the key.
Non-strings come before strings and then tuples. Tuples compare element-wise as normal.
But ultimately all comparisons are between values' ``repr``.
This is memoized.
"""
if not isinstance(s, Set):
raise TypeError('sets only') # depends on [control=['if'], data=[]]
s = frozenset(s)
if s not in _sort_set_memo:
_sort_set_memo[s] = sorted(s, key=_sort_set_key) # depends on [control=['if'], data=['s', '_sort_set_memo']]
return _sort_set_memo[s] |
def tree_walk(cls, directory, tree):
"""Walks a tree returned by `cls.list_to_tree` returning a list of
3-tuples as if from os.walk()."""
results = []
dirs = [d for d in tree if d != FILE_MARKER]
files = tree[FILE_MARKER]
results.append((directory, dirs, files))
for d in dirs:
subdir = os.path.join(directory, d)
subtree = tree[d]
results.extend(cls.tree_walk(subdir, subtree))
return results | def function[tree_walk, parameter[cls, directory, tree]]:
constant[Walks a tree returned by `cls.list_to_tree` returning a list of
3-tuples as if from os.walk().]
variable[results] assign[=] list[[]]
variable[dirs] assign[=] <ast.ListComp object at 0x7da1b0a40610>
variable[files] assign[=] call[name[tree]][name[FILE_MARKER]]
call[name[results].append, parameter[tuple[[<ast.Name object at 0x7da1b0a82bf0>, <ast.Name object at 0x7da1b0a832e0>, <ast.Name object at 0x7da1b0a80340>]]]]
for taget[name[d]] in starred[name[dirs]] begin[:]
variable[subdir] assign[=] call[name[os].path.join, parameter[name[directory], name[d]]]
variable[subtree] assign[=] call[name[tree]][name[d]]
call[name[results].extend, parameter[call[name[cls].tree_walk, parameter[name[subdir], name[subtree]]]]]
return[name[results]] | keyword[def] identifier[tree_walk] ( identifier[cls] , identifier[directory] , identifier[tree] ):
literal[string]
identifier[results] =[]
identifier[dirs] =[ identifier[d] keyword[for] identifier[d] keyword[in] identifier[tree] keyword[if] identifier[d] != identifier[FILE_MARKER] ]
identifier[files] = identifier[tree] [ identifier[FILE_MARKER] ]
identifier[results] . identifier[append] (( identifier[directory] , identifier[dirs] , identifier[files] ))
keyword[for] identifier[d] keyword[in] identifier[dirs] :
identifier[subdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[d] )
identifier[subtree] = identifier[tree] [ identifier[d] ]
identifier[results] . identifier[extend] ( identifier[cls] . identifier[tree_walk] ( identifier[subdir] , identifier[subtree] ))
keyword[return] identifier[results] | def tree_walk(cls, directory, tree):
"""Walks a tree returned by `cls.list_to_tree` returning a list of
3-tuples as if from os.walk()."""
results = []
dirs = [d for d in tree if d != FILE_MARKER]
files = tree[FILE_MARKER]
results.append((directory, dirs, files))
for d in dirs:
subdir = os.path.join(directory, d)
subtree = tree[d]
results.extend(cls.tree_walk(subdir, subtree)) # depends on [control=['for'], data=['d']]
return results |
def run(self):
"""
Run the consumer.
"""
self.start()
timeout = self._stop_flag_timeout
health_check_ts = time.time()
while True:
try:
self.stop_flag.wait(timeout=timeout)
except KeyboardInterrupt:
self._logger.info('Received SIGINT')
self.stop(graceful=True)
except:
self._logger.exception('Error in consumer.')
self.stop()
else:
if self._received_signal:
self.stop(graceful=self._graceful)
if self.stop_flag.is_set():
break
if self._health_check:
now = time.time()
if now >= health_check_ts + self._health_check_interval:
health_check_ts = now
self.check_worker_health()
if self._restart:
self._logger.info('Consumer will restart.')
python = sys.executable
os.execl(python, python, *sys.argv)
else:
self._logger.info('Consumer exiting.') | def function[run, parameter[self]]:
constant[
Run the consumer.
]
call[name[self].start, parameter[]]
variable[timeout] assign[=] name[self]._stop_flag_timeout
variable[health_check_ts] assign[=] call[name[time].time, parameter[]]
while constant[True] begin[:]
<ast.Try object at 0x7da20e957f70>
if call[name[self].stop_flag.is_set, parameter[]] begin[:]
break
if name[self]._health_check begin[:]
variable[now] assign[=] call[name[time].time, parameter[]]
if compare[name[now] greater_or_equal[>=] binary_operation[name[health_check_ts] + name[self]._health_check_interval]] begin[:]
variable[health_check_ts] assign[=] name[now]
call[name[self].check_worker_health, parameter[]]
if name[self]._restart begin[:]
call[name[self]._logger.info, parameter[constant[Consumer will restart.]]]
variable[python] assign[=] name[sys].executable
call[name[os].execl, parameter[name[python], name[python], <ast.Starred object at 0x7da20e956590>]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[start] ()
identifier[timeout] = identifier[self] . identifier[_stop_flag_timeout]
identifier[health_check_ts] = identifier[time] . identifier[time] ()
keyword[while] keyword[True] :
keyword[try] :
identifier[self] . identifier[stop_flag] . identifier[wait] ( identifier[timeout] = identifier[timeout] )
keyword[except] identifier[KeyboardInterrupt] :
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[stop] ( identifier[graceful] = keyword[True] )
keyword[except] :
identifier[self] . identifier[_logger] . identifier[exception] ( literal[string] )
identifier[self] . identifier[stop] ()
keyword[else] :
keyword[if] identifier[self] . identifier[_received_signal] :
identifier[self] . identifier[stop] ( identifier[graceful] = identifier[self] . identifier[_graceful] )
keyword[if] identifier[self] . identifier[stop_flag] . identifier[is_set] ():
keyword[break]
keyword[if] identifier[self] . identifier[_health_check] :
identifier[now] = identifier[time] . identifier[time] ()
keyword[if] identifier[now] >= identifier[health_check_ts] + identifier[self] . identifier[_health_check_interval] :
identifier[health_check_ts] = identifier[now]
identifier[self] . identifier[check_worker_health] ()
keyword[if] identifier[self] . identifier[_restart] :
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
identifier[python] = identifier[sys] . identifier[executable]
identifier[os] . identifier[execl] ( identifier[python] , identifier[python] ,* identifier[sys] . identifier[argv] )
keyword[else] :
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] ) | def run(self):
"""
Run the consumer.
"""
self.start()
timeout = self._stop_flag_timeout
health_check_ts = time.time()
while True:
try:
self.stop_flag.wait(timeout=timeout) # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
self._logger.info('Received SIGINT')
self.stop(graceful=True) # depends on [control=['except'], data=[]]
except:
self._logger.exception('Error in consumer.')
self.stop() # depends on [control=['except'], data=[]]
else:
if self._received_signal:
self.stop(graceful=self._graceful) # depends on [control=['if'], data=[]]
if self.stop_flag.is_set():
break # depends on [control=['if'], data=[]]
if self._health_check:
now = time.time()
if now >= health_check_ts + self._health_check_interval:
health_check_ts = now
self.check_worker_health() # depends on [control=['if'], data=['now']] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if self._restart:
self._logger.info('Consumer will restart.')
python = sys.executable
os.execl(python, python, *sys.argv) # depends on [control=['if'], data=[]]
else:
self._logger.info('Consumer exiting.') |
def save(filters='*.*', text='Save THIS, facehead!', default_directory='default_directory', force_extension=None):
"""
Pops up a save dialog and returns the string path of the selected file.
Parameters
----------
filters='*.*'
Which file types should appear in the dialog.
text='Save THIS, facehead!'
Title text for the dialog.
default_directory='default_directory'
Key for the spinmob.settings default directory. If you use a name, e.g.
'my_defaultypoo', for one call of this function, the next time you use
the same name, it will start in the last dialog's directory by default.
force_extension=None
Setting this to a string, e.g. 'txt', will enforce that the filename
will have this extension.
"""
# make sure the filters contains "*.*" as an option!
if not '*' in filters.split(';'): filters = filters + ";;All files (*)"
# if this type of pref doesn't exist, we need to make a new one
if default_directory in _settings.keys(): default = _settings[default_directory]
else: default = ""
# pop up the dialog
result = _qtw.QFileDialog.getSaveFileName(None,text,default,filters)
# If Qt5, take the zeroth element
if _s._qt.VERSION_INFO[0:5] == "PyQt5": result = result[0]
# Make sure it's a string
result = str(result)
# Enforce the extension if necessary
if not force_extension == None:
# In case the user put "*.txt" instead of just "txt"
force_extension = force_extension.replace('*','').replace('.','')
# If it doesn't end with the right extension, add this.
if not _os.path.splitext(result)[-1][1:] == force_extension:
result = result + '.' + force_extension
if result == '': return None
else:
_settings[default_directory] = _os.path.split(result)[0]
return result | def function[save, parameter[filters, text, default_directory, force_extension]]:
constant[
Pops up a save dialog and returns the string path of the selected file.
Parameters
----------
filters='*.*'
Which file types should appear in the dialog.
text='Save THIS, facehead!'
Title text for the dialog.
default_directory='default_directory'
Key for the spinmob.settings default directory. If you use a name, e.g.
'my_defaultypoo', for one call of this function, the next time you use
the same name, it will start in the last dialog's directory by default.
force_extension=None
Setting this to a string, e.g. 'txt', will enforce that the filename
will have this extension.
]
if <ast.UnaryOp object at 0x7da2041d9690> begin[:]
variable[filters] assign[=] binary_operation[name[filters] + constant[;;All files (*)]]
if compare[name[default_directory] in call[name[_settings].keys, parameter[]]] begin[:]
variable[default] assign[=] call[name[_settings]][name[default_directory]]
variable[result] assign[=] call[name[_qtw].QFileDialog.getSaveFileName, parameter[constant[None], name[text], name[default], name[filters]]]
if compare[call[name[_s]._qt.VERSION_INFO][<ast.Slice object at 0x7da2041dac20>] equal[==] constant[PyQt5]] begin[:]
variable[result] assign[=] call[name[result]][constant[0]]
variable[result] assign[=] call[name[str], parameter[name[result]]]
if <ast.UnaryOp object at 0x7da2041d8ee0> begin[:]
variable[force_extension] assign[=] call[call[name[force_extension].replace, parameter[constant[*], constant[]]].replace, parameter[constant[.], constant[]]]
if <ast.UnaryOp object at 0x7da2041daa40> begin[:]
variable[result] assign[=] binary_operation[binary_operation[name[result] + constant[.]] + name[force_extension]]
if compare[name[result] equal[==] constant[]] begin[:]
return[constant[None]] | keyword[def] identifier[save] ( identifier[filters] = literal[string] , identifier[text] = literal[string] , identifier[default_directory] = literal[string] , identifier[force_extension] = keyword[None] ):
literal[string]
keyword[if] keyword[not] literal[string] keyword[in] identifier[filters] . identifier[split] ( literal[string] ): identifier[filters] = identifier[filters] + literal[string]
keyword[if] identifier[default_directory] keyword[in] identifier[_settings] . identifier[keys] (): identifier[default] = identifier[_settings] [ identifier[default_directory] ]
keyword[else] : identifier[default] = literal[string]
identifier[result] = identifier[_qtw] . identifier[QFileDialog] . identifier[getSaveFileName] ( keyword[None] , identifier[text] , identifier[default] , identifier[filters] )
keyword[if] identifier[_s] . identifier[_qt] . identifier[VERSION_INFO] [ literal[int] : literal[int] ]== literal[string] : identifier[result] = identifier[result] [ literal[int] ]
identifier[result] = identifier[str] ( identifier[result] )
keyword[if] keyword[not] identifier[force_extension] == keyword[None] :
identifier[force_extension] = identifier[force_extension] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[_os] . identifier[path] . identifier[splitext] ( identifier[result] )[- literal[int] ][ literal[int] :]== identifier[force_extension] :
identifier[result] = identifier[result] + literal[string] + identifier[force_extension]
keyword[if] identifier[result] == literal[string] : keyword[return] keyword[None]
keyword[else] :
identifier[_settings] [ identifier[default_directory] ]= identifier[_os] . identifier[path] . identifier[split] ( identifier[result] )[ literal[int] ]
keyword[return] identifier[result] | def save(filters='*.*', text='Save THIS, facehead!', default_directory='default_directory', force_extension=None):
"""
Pops up a save dialog and returns the string path of the selected file.
Parameters
----------
filters='*.*'
Which file types should appear in the dialog.
text='Save THIS, facehead!'
Title text for the dialog.
default_directory='default_directory'
Key for the spinmob.settings default directory. If you use a name, e.g.
'my_defaultypoo', for one call of this function, the next time you use
the same name, it will start in the last dialog's directory by default.
force_extension=None
Setting this to a string, e.g. 'txt', will enforce that the filename
will have this extension.
"""
# make sure the filters contains "*.*" as an option!
if not '*' in filters.split(';'):
filters = filters + ';;All files (*)' # depends on [control=['if'], data=[]]
# if this type of pref doesn't exist, we need to make a new one
if default_directory in _settings.keys():
default = _settings[default_directory] # depends on [control=['if'], data=['default_directory']]
else:
default = ''
# pop up the dialog
result = _qtw.QFileDialog.getSaveFileName(None, text, default, filters)
# If Qt5, take the zeroth element
if _s._qt.VERSION_INFO[0:5] == 'PyQt5':
result = result[0] # depends on [control=['if'], data=[]]
# Make sure it's a string
result = str(result)
# Enforce the extension if necessary
if not force_extension == None:
# In case the user put "*.txt" instead of just "txt"
force_extension = force_extension.replace('*', '').replace('.', '')
# If it doesn't end with the right extension, add this.
if not _os.path.splitext(result)[-1][1:] == force_extension:
result = result + '.' + force_extension # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if result == '':
return None # depends on [control=['if'], data=[]]
else:
_settings[default_directory] = _os.path.split(result)[0]
return result |
def _adjust_parent_stack(fsh, prev, parents):
"""
Adjust the parent stack in-place as the trigger level changes.
Parameters
----------
fsh : :class:`FoldScopeHelper`
The :class:`FoldScopeHelper` object to act on.
prev : :class:`FoldScopeHelper`
The previous :class:`FoldScopeHelper` object.
parents : list of :class:`FoldScopeHelper`
The current list of parent objects.
Returns
-------
None
"""
if prev is None:
return
if fsh.fold_scope.trigger_level < prev.fold_scope.trigger_level:
diff = prev.fold_scope.trigger_level - fsh.fold_scope.trigger_level
del parents[-diff:]
elif fsh.fold_scope.trigger_level > prev.fold_scope.trigger_level:
parents.append(prev)
elif fsh.fold_scope.trigger_level == prev.fold_scope.trigger_level:
pass | def function[_adjust_parent_stack, parameter[fsh, prev, parents]]:
constant[
Adjust the parent stack in-place as the trigger level changes.
Parameters
----------
fsh : :class:`FoldScopeHelper`
The :class:`FoldScopeHelper` object to act on.
prev : :class:`FoldScopeHelper`
The previous :class:`FoldScopeHelper` object.
parents : list of :class:`FoldScopeHelper`
The current list of parent objects.
Returns
-------
None
]
if compare[name[prev] is constant[None]] begin[:]
return[None]
if compare[name[fsh].fold_scope.trigger_level less[<] name[prev].fold_scope.trigger_level] begin[:]
variable[diff] assign[=] binary_operation[name[prev].fold_scope.trigger_level - name[fsh].fold_scope.trigger_level]
<ast.Delete object at 0x7da18bccaa40> | keyword[def] identifier[_adjust_parent_stack] ( identifier[fsh] , identifier[prev] , identifier[parents] ):
literal[string]
keyword[if] identifier[prev] keyword[is] keyword[None] :
keyword[return]
keyword[if] identifier[fsh] . identifier[fold_scope] . identifier[trigger_level] < identifier[prev] . identifier[fold_scope] . identifier[trigger_level] :
identifier[diff] = identifier[prev] . identifier[fold_scope] . identifier[trigger_level] - identifier[fsh] . identifier[fold_scope] . identifier[trigger_level]
keyword[del] identifier[parents] [- identifier[diff] :]
keyword[elif] identifier[fsh] . identifier[fold_scope] . identifier[trigger_level] > identifier[prev] . identifier[fold_scope] . identifier[trigger_level] :
identifier[parents] . identifier[append] ( identifier[prev] )
keyword[elif] identifier[fsh] . identifier[fold_scope] . identifier[trigger_level] == identifier[prev] . identifier[fold_scope] . identifier[trigger_level] :
keyword[pass] | def _adjust_parent_stack(fsh, prev, parents):
"""
Adjust the parent stack in-place as the trigger level changes.
Parameters
----------
fsh : :class:`FoldScopeHelper`
The :class:`FoldScopeHelper` object to act on.
prev : :class:`FoldScopeHelper`
The previous :class:`FoldScopeHelper` object.
parents : list of :class:`FoldScopeHelper`
The current list of parent objects.
Returns
-------
None
"""
if prev is None:
return # depends on [control=['if'], data=[]]
if fsh.fold_scope.trigger_level < prev.fold_scope.trigger_level:
diff = prev.fold_scope.trigger_level - fsh.fold_scope.trigger_level
del parents[-diff:] # depends on [control=['if'], data=[]]
elif fsh.fold_scope.trigger_level > prev.fold_scope.trigger_level:
parents.append(prev) # depends on [control=['if'], data=[]]
elif fsh.fold_scope.trigger_level == prev.fold_scope.trigger_level:
pass # depends on [control=['if'], data=[]] |
def quadratic(Ks, dim, rhos, required=None):
r'''
Estimates \int p^2 based on kNN distances.
In here because it's used in the l2 distance, above.
Returns array of shape (num_Ks,).
'''
# Estimated with alpha=1, beta=0:
# B_{k,d,1,0} is the same as B_{k,d,0,1} in linear()
# and the full estimator is
# B / (n - 1) * mean(rho ^ -dim)
N = rhos.shape[0]
Ks = np.asarray(Ks)
Bs = (Ks - 1) / np.pi ** (dim / 2) * gamma(dim / 2 + 1) # shape (num_Ks,)
est = Bs / (N - 1) * np.mean(rhos ** (-dim), axis=0)
return est | def function[quadratic, parameter[Ks, dim, rhos, required]]:
constant[
Estimates \int p^2 based on kNN distances.
In here because it's used in the l2 distance, above.
Returns array of shape (num_Ks,).
]
variable[N] assign[=] call[name[rhos].shape][constant[0]]
variable[Ks] assign[=] call[name[np].asarray, parameter[name[Ks]]]
variable[Bs] assign[=] binary_operation[binary_operation[binary_operation[name[Ks] - constant[1]] / binary_operation[name[np].pi ** binary_operation[name[dim] / constant[2]]]] * call[name[gamma], parameter[binary_operation[binary_operation[name[dim] / constant[2]] + constant[1]]]]]
variable[est] assign[=] binary_operation[binary_operation[name[Bs] / binary_operation[name[N] - constant[1]]] * call[name[np].mean, parameter[binary_operation[name[rhos] ** <ast.UnaryOp object at 0x7da207f02c20>]]]]
return[name[est]] | keyword[def] identifier[quadratic] ( identifier[Ks] , identifier[dim] , identifier[rhos] , identifier[required] = keyword[None] ):
literal[string]
identifier[N] = identifier[rhos] . identifier[shape] [ literal[int] ]
identifier[Ks] = identifier[np] . identifier[asarray] ( identifier[Ks] )
identifier[Bs] =( identifier[Ks] - literal[int] )/ identifier[np] . identifier[pi] **( identifier[dim] / literal[int] )* identifier[gamma] ( identifier[dim] / literal[int] + literal[int] )
identifier[est] = identifier[Bs] /( identifier[N] - literal[int] )* identifier[np] . identifier[mean] ( identifier[rhos] **(- identifier[dim] ), identifier[axis] = literal[int] )
keyword[return] identifier[est] | def quadratic(Ks, dim, rhos, required=None):
"""
Estimates \\int p^2 based on kNN distances.
In here because it's used in the l2 distance, above.
Returns array of shape (num_Ks,).
"""
# Estimated with alpha=1, beta=0:
# B_{k,d,1,0} is the same as B_{k,d,0,1} in linear()
# and the full estimator is
# B / (n - 1) * mean(rho ^ -dim)
N = rhos.shape[0]
Ks = np.asarray(Ks)
Bs = (Ks - 1) / np.pi ** (dim / 2) * gamma(dim / 2 + 1) # shape (num_Ks,)
est = Bs / (N - 1) * np.mean(rhos ** (-dim), axis=0)
return est |
def read(cls, proto):
"""
Intercepts TemporalMemory deserialization request in order to initialize
`self.infActiveState`
@param proto (DynamicStructBuilder) Proto object
@return (TemporalMemory) TemporalMemory shim instance
"""
tm = super(MonitoredTMShim, cls).read(proto)
tm.infActiveState = {"t": None}
return tm | def function[read, parameter[cls, proto]]:
constant[
Intercepts TemporalMemory deserialization request in order to initialize
`self.infActiveState`
@param proto (DynamicStructBuilder) Proto object
@return (TemporalMemory) TemporalMemory shim instance
]
variable[tm] assign[=] call[call[name[super], parameter[name[MonitoredTMShim], name[cls]]].read, parameter[name[proto]]]
name[tm].infActiveState assign[=] dictionary[[<ast.Constant object at 0x7da20c7caad0>], [<ast.Constant object at 0x7da20c7cb6d0>]]
return[name[tm]] | keyword[def] identifier[read] ( identifier[cls] , identifier[proto] ):
literal[string]
identifier[tm] = identifier[super] ( identifier[MonitoredTMShim] , identifier[cls] ). identifier[read] ( identifier[proto] )
identifier[tm] . identifier[infActiveState] ={ literal[string] : keyword[None] }
keyword[return] identifier[tm] | def read(cls, proto):
"""
Intercepts TemporalMemory deserialization request in order to initialize
`self.infActiveState`
@param proto (DynamicStructBuilder) Proto object
@return (TemporalMemory) TemporalMemory shim instance
"""
tm = super(MonitoredTMShim, cls).read(proto)
tm.infActiveState = {'t': None}
return tm |
def softplus(X):
""" Pass X through a soft-plus function, , in a numerically
stable way (using the log-sum-exp trick).
The softplus transformation is:
.. math::
\log(1 + \exp\{X\})
Parameters
----------
X: ndarray
shape (N,) array or shape (N, D) array of data.
Returns
-------
spX: ndarray
array of same shape of X with the result of softmax(X).
"""
if np.isscalar(X):
return logsumexp(np.vstack((np.zeros(1), [X])).T, axis=1)[0]
N = X.shape[0]
if X.ndim == 1:
return logsumexp(np.vstack((np.zeros(N), X)).T, axis=1)
elif X.ndim == 2:
sftX = np.empty(X.shape, dtype=float)
for d in range(X.shape[1]):
sftX[:, d] = logsumexp(np.vstack((np.zeros(N), X[:, d])).T, axis=1)
return sftX
else:
raise ValueError("This only works on up to 2D arrays.") | def function[softplus, parameter[X]]:
constant[ Pass X through a soft-plus function, , in a numerically
stable way (using the log-sum-exp trick).
The softplus transformation is:
.. math::
\log(1 + \exp\{X\})
Parameters
----------
X: ndarray
shape (N,) array or shape (N, D) array of data.
Returns
-------
spX: ndarray
array of same shape of X with the result of softmax(X).
]
if call[name[np].isscalar, parameter[name[X]]] begin[:]
return[call[call[name[logsumexp], parameter[call[name[np].vstack, parameter[tuple[[<ast.Call object at 0x7da18f00c4c0>, <ast.List object at 0x7da18f00cdf0>]]]].T]]][constant[0]]]
variable[N] assign[=] call[name[X].shape][constant[0]]
if compare[name[X].ndim equal[==] constant[1]] begin[:]
return[call[name[logsumexp], parameter[call[name[np].vstack, parameter[tuple[[<ast.Call object at 0x7da18f00ce50>, <ast.Name object at 0x7da18f00e230>]]]].T]]] | keyword[def] identifier[softplus] ( identifier[X] ):
literal[string]
keyword[if] identifier[np] . identifier[isscalar] ( identifier[X] ):
keyword[return] identifier[logsumexp] ( identifier[np] . identifier[vstack] (( identifier[np] . identifier[zeros] ( literal[int] ),[ identifier[X] ])). identifier[T] , identifier[axis] = literal[int] )[ literal[int] ]
identifier[N] = identifier[X] . identifier[shape] [ literal[int] ]
keyword[if] identifier[X] . identifier[ndim] == literal[int] :
keyword[return] identifier[logsumexp] ( identifier[np] . identifier[vstack] (( identifier[np] . identifier[zeros] ( identifier[N] ), identifier[X] )). identifier[T] , identifier[axis] = literal[int] )
keyword[elif] identifier[X] . identifier[ndim] == literal[int] :
identifier[sftX] = identifier[np] . identifier[empty] ( identifier[X] . identifier[shape] , identifier[dtype] = identifier[float] )
keyword[for] identifier[d] keyword[in] identifier[range] ( identifier[X] . identifier[shape] [ literal[int] ]):
identifier[sftX] [:, identifier[d] ]= identifier[logsumexp] ( identifier[np] . identifier[vstack] (( identifier[np] . identifier[zeros] ( identifier[N] ), identifier[X] [:, identifier[d] ])). identifier[T] , identifier[axis] = literal[int] )
keyword[return] identifier[sftX]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def softplus(X):
""" Pass X through a soft-plus function, , in a numerically
stable way (using the log-sum-exp trick).
The softplus transformation is:
.. math::
\\log(1 + \\exp\\{X\\})
Parameters
----------
X: ndarray
shape (N,) array or shape (N, D) array of data.
Returns
-------
spX: ndarray
array of same shape of X with the result of softmax(X).
"""
if np.isscalar(X):
return logsumexp(np.vstack((np.zeros(1), [X])).T, axis=1)[0] # depends on [control=['if'], data=[]]
N = X.shape[0]
if X.ndim == 1:
return logsumexp(np.vstack((np.zeros(N), X)).T, axis=1) # depends on [control=['if'], data=[]]
elif X.ndim == 2:
sftX = np.empty(X.shape, dtype=float)
for d in range(X.shape[1]):
sftX[:, d] = logsumexp(np.vstack((np.zeros(N), X[:, d])).T, axis=1) # depends on [control=['for'], data=['d']]
return sftX # depends on [control=['if'], data=[]]
else:
raise ValueError('This only works on up to 2D arrays.') |
def _topological_sort(self, targets):
"""Topologically order a list of targets"""
target_set = set(targets)
return [t for t in reversed(sort_targets(targets)) if t in target_set] | def function[_topological_sort, parameter[self, targets]]:
constant[Topologically order a list of targets]
variable[target_set] assign[=] call[name[set], parameter[name[targets]]]
return[<ast.ListComp object at 0x7da18bcc8ca0>] | keyword[def] identifier[_topological_sort] ( identifier[self] , identifier[targets] ):
literal[string]
identifier[target_set] = identifier[set] ( identifier[targets] )
keyword[return] [ identifier[t] keyword[for] identifier[t] keyword[in] identifier[reversed] ( identifier[sort_targets] ( identifier[targets] )) keyword[if] identifier[t] keyword[in] identifier[target_set] ] | def _topological_sort(self, targets):
"""Topologically order a list of targets"""
target_set = set(targets)
return [t for t in reversed(sort_targets(targets)) if t in target_set] |
def load(self, msg, stuff_to_load, *args, **kwargs):
"""Loads a particular item from disk.
The storage service always accepts these parameters:
:param trajectory_name: Name of current trajectory and name of top node in hdf5 file.
:param trajectory_index:
If no `trajectory_name` is provided, you can specify an integer index.
The trajectory at the index position in the hdf5 file is considered to loaded.
Negative indices are also possible for reverse indexing.
:param filename: Name of the hdf5 file
The following messages (first argument msg) are understood and the following arguments
can be provided in combination with the message:
* :const:`pypet.pypetconstants.TRAJECTORY` ('TRAJECTORY')
Loads a trajectory.
:param stuff_to_load: The trajectory
:param as_new: Whether to load trajectory as new
:param load_parameters: How to load parameters and config
:param load_derived_parameters: How to load derived parameters
:param load_results: How to load results
:param force: Force load in case there is a pypet version mismatch
You can specify how to load the parameters, derived parameters and results
as follows:
:const:`pypet.pypetconstants.LOAD_NOTHING`: (0)
Nothing is loaded
:const:`pypet.pypetconstants.LOAD_SKELETON`: (1)
The skeleton including annotations are loaded, i.e. the items are empty.
Non-empty items in RAM are left untouched.
:const:`pypet.pypetconstants.LOAD_DATA`: (2)
The whole data is loaded.
Only empty or in RAM non-existing instance are filled with the
data found on disk.
:const:`pypet.pypetconstants.OVERWRITE_DATA`: (3)
The whole data is loaded.
If items that are to be loaded are already in RAM and not empty,
they are emptied and new data is loaded from disk.
* :const:`pypet.pypetconstants.LEAF` ('LEAF')
Loads a parameter or result.
:param stuff_to_load: The item to be loaded
:param load_data: How to load data
:param load_only:
If you load a result, you can partially load it and ignore the
rest of the data. Just specify the name of the data you want to load.
You can also provide a list,
for example `load_only='spikes'`, `load_only=['spikes','membrane_potential']`.
Issues a warning if items cannot be found.
:param load_except:
If you load a result you can partially load in and specify items
that should NOT be loaded here. You cannot use `load_except` and
`load_only` at the same time.
* :const:`pypet.pyetconstants.GROUP`
Loads a group a node (comment and annotations)
:param recursive:
Recursively loads everything below
:param load_data:
How to load stuff if ``recursive=True``
accepted values as above for loading the trajectory
:param max_depth:
Maximum depth in case of recursion. `None` for no limit.
* :const:`pypet.pypetconstants.TREE` ('TREE')
Loads a whole subtree
:param stuff_to_load: The parent node (!) not the one where loading starts!
:param child_name: Name of child node that should be loaded
:param recursive: Whether to load recursively the subtree below child
:param load_data:
How to load stuff, accepted values as above for loading the trajectory
:param max_depth:
Maximum depth in case of recursion. `None` for no limit.
:param trajectory: The trajectory object
* :const:`pypet.pypetconstants.LIST` ('LIST')
Analogous to :ref:`storing lists <store-lists>`
:raises:
NoSuchServiceError if message or data is not understood
DataNotInStorageError if data to be loaded cannot be found on disk
"""
opened = True
try:
opened = self._srvc_opening_routine('r', kwargs=kwargs)
if msg == pypetconstants.TRAJECTORY:
self._trj_load_trajectory(stuff_to_load, *args, **kwargs)
elif msg == pypetconstants.LEAF:
self._prm_load_parameter_or_result(stuff_to_load, *args, **kwargs)
elif msg == pypetconstants.GROUP:
self._grp_load_group(stuff_to_load, *args, **kwargs)
elif msg == pypetconstants.TREE:
self._tree_load_sub_branch(stuff_to_load, *args, **kwargs)
elif msg == pypetconstants.LIST:
self._srvc_load_several_items(stuff_to_load, *args, **kwargs)
else:
raise pex.NoSuchServiceError('I do not know how to handle `%s`' % msg)
except pt.NoSuchNodeError as exc:
self._logger.error('Failed loading `%s`' % str(stuff_to_load))
raise pex.DataNotInStorageError(repr(exc))
except:
self._logger.error('Failed loading `%s`' % str(stuff_to_load))
raise
finally:
self._srvc_closing_routine(opened) | def function[load, parameter[self, msg, stuff_to_load]]:
constant[Loads a particular item from disk.
The storage service always accepts these parameters:
:param trajectory_name: Name of current trajectory and name of top node in hdf5 file.
:param trajectory_index:
If no `trajectory_name` is provided, you can specify an integer index.
The trajectory at the index position in the hdf5 file is considered to loaded.
Negative indices are also possible for reverse indexing.
:param filename: Name of the hdf5 file
The following messages (first argument msg) are understood and the following arguments
can be provided in combination with the message:
* :const:`pypet.pypetconstants.TRAJECTORY` ('TRAJECTORY')
Loads a trajectory.
:param stuff_to_load: The trajectory
:param as_new: Whether to load trajectory as new
:param load_parameters: How to load parameters and config
:param load_derived_parameters: How to load derived parameters
:param load_results: How to load results
:param force: Force load in case there is a pypet version mismatch
You can specify how to load the parameters, derived parameters and results
as follows:
:const:`pypet.pypetconstants.LOAD_NOTHING`: (0)
Nothing is loaded
:const:`pypet.pypetconstants.LOAD_SKELETON`: (1)
The skeleton including annotations are loaded, i.e. the items are empty.
Non-empty items in RAM are left untouched.
:const:`pypet.pypetconstants.LOAD_DATA`: (2)
The whole data is loaded.
Only empty or in RAM non-existing instance are filled with the
data found on disk.
:const:`pypet.pypetconstants.OVERWRITE_DATA`: (3)
The whole data is loaded.
If items that are to be loaded are already in RAM and not empty,
they are emptied and new data is loaded from disk.
* :const:`pypet.pypetconstants.LEAF` ('LEAF')
Loads a parameter or result.
:param stuff_to_load: The item to be loaded
:param load_data: How to load data
:param load_only:
If you load a result, you can partially load it and ignore the
rest of the data. Just specify the name of the data you want to load.
You can also provide a list,
for example `load_only='spikes'`, `load_only=['spikes','membrane_potential']`.
Issues a warning if items cannot be found.
:param load_except:
If you load a result you can partially load in and specify items
that should NOT be loaded here. You cannot use `load_except` and
`load_only` at the same time.
* :const:`pypet.pyetconstants.GROUP`
Loads a group a node (comment and annotations)
:param recursive:
Recursively loads everything below
:param load_data:
How to load stuff if ``recursive=True``
accepted values as above for loading the trajectory
:param max_depth:
Maximum depth in case of recursion. `None` for no limit.
* :const:`pypet.pypetconstants.TREE` ('TREE')
Loads a whole subtree
:param stuff_to_load: The parent node (!) not the one where loading starts!
:param child_name: Name of child node that should be loaded
:param recursive: Whether to load recursively the subtree below child
:param load_data:
How to load stuff, accepted values as above for loading the trajectory
:param max_depth:
Maximum depth in case of recursion. `None` for no limit.
:param trajectory: The trajectory object
* :const:`pypet.pypetconstants.LIST` ('LIST')
Analogous to :ref:`storing lists <store-lists>`
:raises:
NoSuchServiceError if message or data is not understood
DataNotInStorageError if data to be loaded cannot be found on disk
]
variable[opened] assign[=] constant[True]
<ast.Try object at 0x7da1b0535330> | keyword[def] identifier[load] ( identifier[self] , identifier[msg] , identifier[stuff_to_load] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[opened] = keyword[True]
keyword[try] :
identifier[opened] = identifier[self] . identifier[_srvc_opening_routine] ( literal[string] , identifier[kwargs] = identifier[kwargs] )
keyword[if] identifier[msg] == identifier[pypetconstants] . identifier[TRAJECTORY] :
identifier[self] . identifier[_trj_load_trajectory] ( identifier[stuff_to_load] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[msg] == identifier[pypetconstants] . identifier[LEAF] :
identifier[self] . identifier[_prm_load_parameter_or_result] ( identifier[stuff_to_load] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[msg] == identifier[pypetconstants] . identifier[GROUP] :
identifier[self] . identifier[_grp_load_group] ( identifier[stuff_to_load] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[msg] == identifier[pypetconstants] . identifier[TREE] :
identifier[self] . identifier[_tree_load_sub_branch] ( identifier[stuff_to_load] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[msg] == identifier[pypetconstants] . identifier[LIST] :
identifier[self] . identifier[_srvc_load_several_items] ( identifier[stuff_to_load] ,* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[pex] . identifier[NoSuchServiceError] ( literal[string] % identifier[msg] )
keyword[except] identifier[pt] . identifier[NoSuchNodeError] keyword[as] identifier[exc] :
identifier[self] . identifier[_logger] . identifier[error] ( literal[string] % identifier[str] ( identifier[stuff_to_load] ))
keyword[raise] identifier[pex] . identifier[DataNotInStorageError] ( identifier[repr] ( identifier[exc] ))
keyword[except] :
identifier[self] . identifier[_logger] . identifier[error] ( literal[string] % identifier[str] ( identifier[stuff_to_load] ))
keyword[raise]
keyword[finally] :
identifier[self] . identifier[_srvc_closing_routine] ( identifier[opened] ) | def load(self, msg, stuff_to_load, *args, **kwargs):
"""Loads a particular item from disk.
The storage service always accepts these parameters:
:param trajectory_name: Name of current trajectory and name of top node in hdf5 file.
:param trajectory_index:
If no `trajectory_name` is provided, you can specify an integer index.
The trajectory at the index position in the hdf5 file is considered to loaded.
Negative indices are also possible for reverse indexing.
:param filename: Name of the hdf5 file
The following messages (first argument msg) are understood and the following arguments
can be provided in combination with the message:
* :const:`pypet.pypetconstants.TRAJECTORY` ('TRAJECTORY')
Loads a trajectory.
:param stuff_to_load: The trajectory
:param as_new: Whether to load trajectory as new
:param load_parameters: How to load parameters and config
:param load_derived_parameters: How to load derived parameters
:param load_results: How to load results
:param force: Force load in case there is a pypet version mismatch
You can specify how to load the parameters, derived parameters and results
as follows:
:const:`pypet.pypetconstants.LOAD_NOTHING`: (0)
Nothing is loaded
:const:`pypet.pypetconstants.LOAD_SKELETON`: (1)
The skeleton including annotations are loaded, i.e. the items are empty.
Non-empty items in RAM are left untouched.
:const:`pypet.pypetconstants.LOAD_DATA`: (2)
The whole data is loaded.
Only empty or in RAM non-existing instance are filled with the
data found on disk.
:const:`pypet.pypetconstants.OVERWRITE_DATA`: (3)
The whole data is loaded.
If items that are to be loaded are already in RAM and not empty,
they are emptied and new data is loaded from disk.
* :const:`pypet.pypetconstants.LEAF` ('LEAF')
Loads a parameter or result.
:param stuff_to_load: The item to be loaded
:param load_data: How to load data
:param load_only:
If you load a result, you can partially load it and ignore the
rest of the data. Just specify the name of the data you want to load.
You can also provide a list,
for example `load_only='spikes'`, `load_only=['spikes','membrane_potential']`.
Issues a warning if items cannot be found.
:param load_except:
If you load a result you can partially load in and specify items
that should NOT be loaded here. You cannot use `load_except` and
`load_only` at the same time.
* :const:`pypet.pyetconstants.GROUP`
Loads a group a node (comment and annotations)
:param recursive:
Recursively loads everything below
:param load_data:
How to load stuff if ``recursive=True``
accepted values as above for loading the trajectory
:param max_depth:
Maximum depth in case of recursion. `None` for no limit.
* :const:`pypet.pypetconstants.TREE` ('TREE')
Loads a whole subtree
:param stuff_to_load: The parent node (!) not the one where loading starts!
:param child_name: Name of child node that should be loaded
:param recursive: Whether to load recursively the subtree below child
:param load_data:
How to load stuff, accepted values as above for loading the trajectory
:param max_depth:
Maximum depth in case of recursion. `None` for no limit.
:param trajectory: The trajectory object
* :const:`pypet.pypetconstants.LIST` ('LIST')
Analogous to :ref:`storing lists <store-lists>`
:raises:
NoSuchServiceError if message or data is not understood
DataNotInStorageError if data to be loaded cannot be found on disk
"""
opened = True
try:
opened = self._srvc_opening_routine('r', kwargs=kwargs)
if msg == pypetconstants.TRAJECTORY:
self._trj_load_trajectory(stuff_to_load, *args, **kwargs) # depends on [control=['if'], data=[]]
elif msg == pypetconstants.LEAF:
self._prm_load_parameter_or_result(stuff_to_load, *args, **kwargs) # depends on [control=['if'], data=[]]
elif msg == pypetconstants.GROUP:
self._grp_load_group(stuff_to_load, *args, **kwargs) # depends on [control=['if'], data=[]]
elif msg == pypetconstants.TREE:
self._tree_load_sub_branch(stuff_to_load, *args, **kwargs) # depends on [control=['if'], data=[]]
elif msg == pypetconstants.LIST:
self._srvc_load_several_items(stuff_to_load, *args, **kwargs) # depends on [control=['if'], data=[]]
else:
raise pex.NoSuchServiceError('I do not know how to handle `%s`' % msg) # depends on [control=['try'], data=[]]
except pt.NoSuchNodeError as exc:
self._logger.error('Failed loading `%s`' % str(stuff_to_load))
raise pex.DataNotInStorageError(repr(exc)) # depends on [control=['except'], data=['exc']]
except:
self._logger.error('Failed loading `%s`' % str(stuff_to_load))
raise # depends on [control=['except'], data=[]]
finally:
self._srvc_closing_routine(opened) |
def tile_to_zoom_level(tile, dst_pyramid=None, matching_method="gdal", precision=8):
"""
Determine the best zoom level in target TilePyramid from given Tile.
Parameters
----------
tile : BufferedTile
dst_pyramid : BufferedTilePyramid
matching_method : str ('gdal' or 'min')
gdal: Uses GDAL's standard method. Here, the target resolution is calculated by
averaging the extent's pixel sizes over both x and y axes. This approach
returns a zoom level which may not have the best quality but will speed up
reading significantly.
min: Returns the zoom level which matches the minimum resolution of the extent's
four corner pixels. This approach returns the zoom level with the best
possible quality but with low performance. If the tile extent is outside of
the destination pyramid, a TopologicalError will be raised.
precision : int
Round resolutions to n digits before comparing.
Returns
-------
zoom : int
"""
def width_height(bounds):
try:
l, b, r, t = reproject_geometry(
box(*bounds), src_crs=tile.crs, dst_crs=dst_pyramid.crs
).bounds
except ValueError:
raise TopologicalError("bounds cannot be translated into target CRS")
return r - l, t - b
if tile.tp.crs == dst_pyramid.crs:
return tile.zoom
else:
if matching_method == "gdal":
# use rasterio/GDAL method to calculate default warp target properties
transform, width, height = calculate_default_transform(
tile.tp.crs,
dst_pyramid.crs,
tile.width,
tile.height,
*tile.bounds
)
# this is the resolution the tile would have in destination TilePyramid CRS
tile_resolution = round(transform[0], precision)
elif matching_method == "min":
# calculate the minimum pixel size from the four tile corner pixels
l, b, r, t = tile.bounds
x = tile.pixel_x_size
y = tile.pixel_y_size
res = []
for bounds in [
(l, t - y, l + x, t), # left top
(l, b, l + x, b + y), # left bottom
(r - x, b, r, b + y), # right bottom
(r - x, t - y, r, t) # right top
]:
try:
w, h = width_height(bounds)
res.extend([w, h])
except TopologicalError:
logger.debug("pixel outside of destination pyramid")
if res:
tile_resolution = round(min(res), precision)
else:
raise TopologicalError("tile outside of destination pyramid")
else:
raise ValueError("invalid method given: %s", matching_method)
logger.debug(
"we are looking for a zoom level interpolating to %s resolution",
tile_resolution
)
zoom = 0
while True:
td_resolution = round(dst_pyramid.pixel_x_size(zoom), precision)
if td_resolution <= tile_resolution:
break
zoom += 1
logger.debug("target zoom for %s: %s (%s)", tile_resolution, zoom, td_resolution)
return zoom | def function[tile_to_zoom_level, parameter[tile, dst_pyramid, matching_method, precision]]:
constant[
Determine the best zoom level in target TilePyramid from given Tile.
Parameters
----------
tile : BufferedTile
dst_pyramid : BufferedTilePyramid
matching_method : str ('gdal' or 'min')
gdal: Uses GDAL's standard method. Here, the target resolution is calculated by
averaging the extent's pixel sizes over both x and y axes. This approach
returns a zoom level which may not have the best quality but will speed up
reading significantly.
min: Returns the zoom level which matches the minimum resolution of the extent's
four corner pixels. This approach returns the zoom level with the best
possible quality but with low performance. If the tile extent is outside of
the destination pyramid, a TopologicalError will be raised.
precision : int
Round resolutions to n digits before comparing.
Returns
-------
zoom : int
]
def function[width_height, parameter[bounds]]:
<ast.Try object at 0x7da1b00b0370>
return[tuple[[<ast.BinOp object at 0x7da1b00b00d0>, <ast.BinOp object at 0x7da1b00b0cd0>]]]
if compare[name[tile].tp.crs equal[==] name[dst_pyramid].crs] begin[:]
return[name[tile].zoom] | keyword[def] identifier[tile_to_zoom_level] ( identifier[tile] , identifier[dst_pyramid] = keyword[None] , identifier[matching_method] = literal[string] , identifier[precision] = literal[int] ):
literal[string]
keyword[def] identifier[width_height] ( identifier[bounds] ):
keyword[try] :
identifier[l] , identifier[b] , identifier[r] , identifier[t] = identifier[reproject_geometry] (
identifier[box] (* identifier[bounds] ), identifier[src_crs] = identifier[tile] . identifier[crs] , identifier[dst_crs] = identifier[dst_pyramid] . identifier[crs]
). identifier[bounds]
keyword[except] identifier[ValueError] :
keyword[raise] identifier[TopologicalError] ( literal[string] )
keyword[return] identifier[r] - identifier[l] , identifier[t] - identifier[b]
keyword[if] identifier[tile] . identifier[tp] . identifier[crs] == identifier[dst_pyramid] . identifier[crs] :
keyword[return] identifier[tile] . identifier[zoom]
keyword[else] :
keyword[if] identifier[matching_method] == literal[string] :
identifier[transform] , identifier[width] , identifier[height] = identifier[calculate_default_transform] (
identifier[tile] . identifier[tp] . identifier[crs] ,
identifier[dst_pyramid] . identifier[crs] ,
identifier[tile] . identifier[width] ,
identifier[tile] . identifier[height] ,
* identifier[tile] . identifier[bounds]
)
identifier[tile_resolution] = identifier[round] ( identifier[transform] [ literal[int] ], identifier[precision] )
keyword[elif] identifier[matching_method] == literal[string] :
identifier[l] , identifier[b] , identifier[r] , identifier[t] = identifier[tile] . identifier[bounds]
identifier[x] = identifier[tile] . identifier[pixel_x_size]
identifier[y] = identifier[tile] . identifier[pixel_y_size]
identifier[res] =[]
keyword[for] identifier[bounds] keyword[in] [
( identifier[l] , identifier[t] - identifier[y] , identifier[l] + identifier[x] , identifier[t] ),
( identifier[l] , identifier[b] , identifier[l] + identifier[x] , identifier[b] + identifier[y] ),
( identifier[r] - identifier[x] , identifier[b] , identifier[r] , identifier[b] + identifier[y] ),
( identifier[r] - identifier[x] , identifier[t] - identifier[y] , identifier[r] , identifier[t] )
]:
keyword[try] :
identifier[w] , identifier[h] = identifier[width_height] ( identifier[bounds] )
identifier[res] . identifier[extend] ([ identifier[w] , identifier[h] ])
keyword[except] identifier[TopologicalError] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[res] :
identifier[tile_resolution] = identifier[round] ( identifier[min] ( identifier[res] ), identifier[precision] )
keyword[else] :
keyword[raise] identifier[TopologicalError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] , identifier[matching_method] )
identifier[logger] . identifier[debug] (
literal[string] ,
identifier[tile_resolution]
)
identifier[zoom] = literal[int]
keyword[while] keyword[True] :
identifier[td_resolution] = identifier[round] ( identifier[dst_pyramid] . identifier[pixel_x_size] ( identifier[zoom] ), identifier[precision] )
keyword[if] identifier[td_resolution] <= identifier[tile_resolution] :
keyword[break]
identifier[zoom] += literal[int]
identifier[logger] . identifier[debug] ( literal[string] , identifier[tile_resolution] , identifier[zoom] , identifier[td_resolution] )
keyword[return] identifier[zoom] | def tile_to_zoom_level(tile, dst_pyramid=None, matching_method='gdal', precision=8):
"""
Determine the best zoom level in target TilePyramid from given Tile.
Parameters
----------
tile : BufferedTile
dst_pyramid : BufferedTilePyramid
matching_method : str ('gdal' or 'min')
gdal: Uses GDAL's standard method. Here, the target resolution is calculated by
averaging the extent's pixel sizes over both x and y axes. This approach
returns a zoom level which may not have the best quality but will speed up
reading significantly.
min: Returns the zoom level which matches the minimum resolution of the extent's
four corner pixels. This approach returns the zoom level with the best
possible quality but with low performance. If the tile extent is outside of
the destination pyramid, a TopologicalError will be raised.
precision : int
Round resolutions to n digits before comparing.
Returns
-------
zoom : int
"""
def width_height(bounds):
try:
(l, b, r, t) = reproject_geometry(box(*bounds), src_crs=tile.crs, dst_crs=dst_pyramid.crs).bounds # depends on [control=['try'], data=[]]
except ValueError:
raise TopologicalError('bounds cannot be translated into target CRS') # depends on [control=['except'], data=[]]
return (r - l, t - b)
if tile.tp.crs == dst_pyramid.crs:
return tile.zoom # depends on [control=['if'], data=[]]
else:
if matching_method == 'gdal':
# use rasterio/GDAL method to calculate default warp target properties
(transform, width, height) = calculate_default_transform(tile.tp.crs, dst_pyramid.crs, tile.width, tile.height, *tile.bounds)
# this is the resolution the tile would have in destination TilePyramid CRS
tile_resolution = round(transform[0], precision) # depends on [control=['if'], data=[]]
elif matching_method == 'min':
# calculate the minimum pixel size from the four tile corner pixels
(l, b, r, t) = tile.bounds
x = tile.pixel_x_size
y = tile.pixel_y_size
res = []
for bounds in [(l, t - y, l + x, t), (l, b, l + x, b + y), (r - x, b, r, b + y), (r - x, t - y, r, t)]: # left top
# left bottom
# right bottom
# right top
try:
(w, h) = width_height(bounds)
res.extend([w, h]) # depends on [control=['try'], data=[]]
except TopologicalError:
logger.debug('pixel outside of destination pyramid') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['bounds']]
if res:
tile_resolution = round(min(res), precision) # depends on [control=['if'], data=[]]
else:
raise TopologicalError('tile outside of destination pyramid') # depends on [control=['if'], data=[]]
else:
raise ValueError('invalid method given: %s', matching_method)
logger.debug('we are looking for a zoom level interpolating to %s resolution', tile_resolution)
zoom = 0
while True:
td_resolution = round(dst_pyramid.pixel_x_size(zoom), precision)
if td_resolution <= tile_resolution:
break # depends on [control=['if'], data=[]]
zoom += 1 # depends on [control=['while'], data=[]]
logger.debug('target zoom for %s: %s (%s)', tile_resolution, zoom, td_resolution)
return zoom |
def unpack(self):
"""Decompose a GVariant into a native Python object."""
LEAF_ACCESSORS = {
'b': self.get_boolean,
'y': self.get_byte,
'n': self.get_int16,
'q': self.get_uint16,
'i': self.get_int32,
'u': self.get_uint32,
'x': self.get_int64,
't': self.get_uint64,
'h': self.get_handle,
'd': self.get_double,
's': self.get_string,
'o': self.get_string, # object path
'g': self.get_string, # signature
}
# simple values
la = LEAF_ACCESSORS.get(self.get_type_string())
if la:
return la()
# tuple
if self.get_type_string().startswith('('):
res = [self.get_child_value(i).unpack()
for i in range(self.n_children())]
return tuple(res)
# dictionary
if self.get_type_string().startswith('a{'):
res = {}
for i in range(self.n_children()):
v = self.get_child_value(i)
res[v.get_child_value(0).unpack()] = v.get_child_value(1).unpack()
return res
# array
if self.get_type_string().startswith('a'):
return [self.get_child_value(i).unpack()
for i in range(self.n_children())]
# variant (just unbox transparently)
if self.get_type_string().startswith('v'):
return self.get_variant().unpack()
# maybe
if self.get_type_string().startswith('m'):
m = self.get_maybe()
return m.unpack() if m else None
raise NotImplementedError('unsupported GVariant type ' + self.get_type_string()) | def function[unpack, parameter[self]]:
constant[Decompose a GVariant into a native Python object.]
variable[LEAF_ACCESSORS] assign[=] dictionary[[<ast.Constant object at 0x7da1b10c6530>, <ast.Constant object at 0x7da1b10c6650>, <ast.Constant object at 0x7da1b10c6e90>, <ast.Constant object at 0x7da1b10c6830>, <ast.Constant object at 0x7da1b10c7b20>, <ast.Constant object at 0x7da1b10c6ce0>, <ast.Constant object at 0x7da1b10c7c70>, <ast.Constant object at 0x7da1b10c7850>, <ast.Constant object at 0x7da1b10c7610>, <ast.Constant object at 0x7da1b10c7b80>, <ast.Constant object at 0x7da1b10c78b0>, <ast.Constant object at 0x7da1b10c7970>, <ast.Constant object at 0x7da1b10c74c0>], [<ast.Attribute object at 0x7da1b10c6b60>, <ast.Attribute object at 0x7da1b10c6380>, <ast.Attribute object at 0x7da1b10c7d30>, <ast.Attribute object at 0x7da1b10c56c0>, <ast.Attribute object at 0x7da1b10c7910>, <ast.Attribute object at 0x7da1b10c6320>, <ast.Attribute object at 0x7da1b10c7e20>, <ast.Attribute object at 0x7da1b10c4250>, <ast.Attribute object at 0x7da1b10c6500>, <ast.Attribute object at 0x7da1b10c4430>, <ast.Attribute object at 0x7da1b10c6260>, <ast.Attribute object at 0x7da1b10c7280>, <ast.Attribute object at 0x7da1b10c4af0>]]
variable[la] assign[=] call[name[LEAF_ACCESSORS].get, parameter[call[name[self].get_type_string, parameter[]]]]
if name[la] begin[:]
return[call[name[la], parameter[]]]
if call[call[name[self].get_type_string, parameter[]].startswith, parameter[constant[(]]] begin[:]
variable[res] assign[=] <ast.ListComp object at 0x7da1b10c6770>
return[call[name[tuple], parameter[name[res]]]]
if call[call[name[self].get_type_string, parameter[]].startswith, parameter[constant[a{]]] begin[:]
variable[res] assign[=] dictionary[[], []]
for taget[name[i]] in starred[call[name[range], parameter[call[name[self].n_children, parameter[]]]]] begin[:]
variable[v] assign[=] call[name[self].get_child_value, parameter[name[i]]]
call[name[res]][call[call[name[v].get_child_value, parameter[constant[0]]].unpack, parameter[]]] assign[=] call[call[name[v].get_child_value, parameter[constant[1]]].unpack, parameter[]]
return[name[res]]
if call[call[name[self].get_type_string, parameter[]].startswith, parameter[constant[a]]] begin[:]
return[<ast.ListComp object at 0x7da1b0fd5ab0>]
if call[call[name[self].get_type_string, parameter[]].startswith, parameter[constant[v]]] begin[:]
return[call[call[name[self].get_variant, parameter[]].unpack, parameter[]]]
if call[call[name[self].get_type_string, parameter[]].startswith, parameter[constant[m]]] begin[:]
variable[m] assign[=] call[name[self].get_maybe, parameter[]]
return[<ast.IfExp object at 0x7da1b0fd49a0>]
<ast.Raise object at 0x7da1b0fd61a0> | keyword[def] identifier[unpack] ( identifier[self] ):
literal[string]
identifier[LEAF_ACCESSORS] ={
literal[string] : identifier[self] . identifier[get_boolean] ,
literal[string] : identifier[self] . identifier[get_byte] ,
literal[string] : identifier[self] . identifier[get_int16] ,
literal[string] : identifier[self] . identifier[get_uint16] ,
literal[string] : identifier[self] . identifier[get_int32] ,
literal[string] : identifier[self] . identifier[get_uint32] ,
literal[string] : identifier[self] . identifier[get_int64] ,
literal[string] : identifier[self] . identifier[get_uint64] ,
literal[string] : identifier[self] . identifier[get_handle] ,
literal[string] : identifier[self] . identifier[get_double] ,
literal[string] : identifier[self] . identifier[get_string] ,
literal[string] : identifier[self] . identifier[get_string] ,
literal[string] : identifier[self] . identifier[get_string] ,
}
identifier[la] = identifier[LEAF_ACCESSORS] . identifier[get] ( identifier[self] . identifier[get_type_string] ())
keyword[if] identifier[la] :
keyword[return] identifier[la] ()
keyword[if] identifier[self] . identifier[get_type_string] (). identifier[startswith] ( literal[string] ):
identifier[res] =[ identifier[self] . identifier[get_child_value] ( identifier[i] ). identifier[unpack] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[n_children] ())]
keyword[return] identifier[tuple] ( identifier[res] )
keyword[if] identifier[self] . identifier[get_type_string] (). identifier[startswith] ( literal[string] ):
identifier[res] ={}
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[n_children] ()):
identifier[v] = identifier[self] . identifier[get_child_value] ( identifier[i] )
identifier[res] [ identifier[v] . identifier[get_child_value] ( literal[int] ). identifier[unpack] ()]= identifier[v] . identifier[get_child_value] ( literal[int] ). identifier[unpack] ()
keyword[return] identifier[res]
keyword[if] identifier[self] . identifier[get_type_string] (). identifier[startswith] ( literal[string] ):
keyword[return] [ identifier[self] . identifier[get_child_value] ( identifier[i] ). identifier[unpack] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[n_children] ())]
keyword[if] identifier[self] . identifier[get_type_string] (). identifier[startswith] ( literal[string] ):
keyword[return] identifier[self] . identifier[get_variant] (). identifier[unpack] ()
keyword[if] identifier[self] . identifier[get_type_string] (). identifier[startswith] ( literal[string] ):
identifier[m] = identifier[self] . identifier[get_maybe] ()
keyword[return] identifier[m] . identifier[unpack] () keyword[if] identifier[m] keyword[else] keyword[None]
keyword[raise] identifier[NotImplementedError] ( literal[string] + identifier[self] . identifier[get_type_string] ()) | def unpack(self):
"""Decompose a GVariant into a native Python object.""" # object path
# signature
LEAF_ACCESSORS = {'b': self.get_boolean, 'y': self.get_byte, 'n': self.get_int16, 'q': self.get_uint16, 'i': self.get_int32, 'u': self.get_uint32, 'x': self.get_int64, 't': self.get_uint64, 'h': self.get_handle, 'd': self.get_double, 's': self.get_string, 'o': self.get_string, 'g': self.get_string}
# simple values
la = LEAF_ACCESSORS.get(self.get_type_string())
if la:
return la() # depends on [control=['if'], data=[]]
# tuple
if self.get_type_string().startswith('('):
res = [self.get_child_value(i).unpack() for i in range(self.n_children())]
return tuple(res) # depends on [control=['if'], data=[]]
# dictionary
if self.get_type_string().startswith('a{'):
res = {}
for i in range(self.n_children()):
v = self.get_child_value(i)
res[v.get_child_value(0).unpack()] = v.get_child_value(1).unpack() # depends on [control=['for'], data=['i']]
return res # depends on [control=['if'], data=[]]
# array
if self.get_type_string().startswith('a'):
return [self.get_child_value(i).unpack() for i in range(self.n_children())] # depends on [control=['if'], data=[]]
# variant (just unbox transparently)
if self.get_type_string().startswith('v'):
return self.get_variant().unpack() # depends on [control=['if'], data=[]]
# maybe
if self.get_type_string().startswith('m'):
m = self.get_maybe()
return m.unpack() if m else None # depends on [control=['if'], data=[]]
raise NotImplementedError('unsupported GVariant type ' + self.get_type_string()) |
def _getEndTime(self, t):
"""Add the aggregation period to the input time t and return a datetime object
Years and months are handled as aspecial case due to leap years
and months with different number of dates. They can't be converted
to a strict timedelta because a period of 3 months will have different
durations actually. The solution is to just add the years and months
fields directly to the current time.
Other periods are converted to timedelta and just added to current time.
"""
assert isinstance(t, datetime.datetime)
if self._aggTimeDelta:
return t + self._aggTimeDelta
else:
year = t.year + self._aggYears + (t.month - 1 + self._aggMonths) / 12
month = (t.month - 1 + self._aggMonths) % 12 + 1
return t.replace(year=year, month=month) | def function[_getEndTime, parameter[self, t]]:
constant[Add the aggregation period to the input time t and return a datetime object
Years and months are handled as aspecial case due to leap years
and months with different number of dates. They can't be converted
to a strict timedelta because a period of 3 months will have different
durations actually. The solution is to just add the years and months
fields directly to the current time.
Other periods are converted to timedelta and just added to current time.
]
assert[call[name[isinstance], parameter[name[t], name[datetime].datetime]]]
if name[self]._aggTimeDelta begin[:]
return[binary_operation[name[t] + name[self]._aggTimeDelta]] | keyword[def] identifier[_getEndTime] ( identifier[self] , identifier[t] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[t] , identifier[datetime] . identifier[datetime] )
keyword[if] identifier[self] . identifier[_aggTimeDelta] :
keyword[return] identifier[t] + identifier[self] . identifier[_aggTimeDelta]
keyword[else] :
identifier[year] = identifier[t] . identifier[year] + identifier[self] . identifier[_aggYears] +( identifier[t] . identifier[month] - literal[int] + identifier[self] . identifier[_aggMonths] )/ literal[int]
identifier[month] =( identifier[t] . identifier[month] - literal[int] + identifier[self] . identifier[_aggMonths] )% literal[int] + literal[int]
keyword[return] identifier[t] . identifier[replace] ( identifier[year] = identifier[year] , identifier[month] = identifier[month] ) | def _getEndTime(self, t):
"""Add the aggregation period to the input time t and return a datetime object
Years and months are handled as aspecial case due to leap years
and months with different number of dates. They can't be converted
to a strict timedelta because a period of 3 months will have different
durations actually. The solution is to just add the years and months
fields directly to the current time.
Other periods are converted to timedelta and just added to current time.
"""
assert isinstance(t, datetime.datetime)
if self._aggTimeDelta:
return t + self._aggTimeDelta # depends on [control=['if'], data=[]]
else:
year = t.year + self._aggYears + (t.month - 1 + self._aggMonths) / 12
month = (t.month - 1 + self._aggMonths) % 12 + 1
return t.replace(year=year, month=month) |
def get_field(self, field_name):
"""Get field associated with given attribute."""
for attr_name, field in self:
if field_name == attr_name:
return field
raise errors.FieldNotFound('Field not found', field_name) | def function[get_field, parameter[self, field_name]]:
constant[Get field associated with given attribute.]
for taget[tuple[[<ast.Name object at 0x7da1b1081cc0>, <ast.Name object at 0x7da1b10833d0>]]] in starred[name[self]] begin[:]
if compare[name[field_name] equal[==] name[attr_name]] begin[:]
return[name[field]]
<ast.Raise object at 0x7da1b10835b0> | keyword[def] identifier[get_field] ( identifier[self] , identifier[field_name] ):
literal[string]
keyword[for] identifier[attr_name] , identifier[field] keyword[in] identifier[self] :
keyword[if] identifier[field_name] == identifier[attr_name] :
keyword[return] identifier[field]
keyword[raise] identifier[errors] . identifier[FieldNotFound] ( literal[string] , identifier[field_name] ) | def get_field(self, field_name):
"""Get field associated with given attribute."""
for (attr_name, field) in self:
if field_name == attr_name:
return field # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
raise errors.FieldNotFound('Field not found', field_name) |
def get_extents(self, element, ranges, range_type='combined'):
"""
Make adjustments to plot extents by computing
stacked bar heights, adjusting the bar baseline
and forcing the x-axis to be categorical.
"""
if self.batched:
overlay = self.current_frame
element = Bars(overlay.table(), kdims=element.kdims+overlay.kdims,
vdims=element.vdims)
for kd in overlay.kdims:
ranges[kd.name]['combined'] = overlay.range(kd)
extents = super(BarPlot, self).get_extents(element, ranges, range_type)
xdim = element.kdims[0]
ydim = element.vdims[0]
# Compute stack heights
if self.stacked or self.stack_index:
ds = Dataset(element)
pos_range = ds.select(**{ydim.name: (0, None)}).aggregate(xdim, function=np.sum).range(ydim)
neg_range = ds.select(**{ydim.name: (None, 0)}).aggregate(xdim, function=np.sum).range(ydim)
y0, y1 = max_range([pos_range, neg_range])
else:
y0, y1 = ranges[ydim.name]['combined']
padding = 0 if self.overlaid else self.padding
_, ypad, _ = get_axis_padding(padding)
y0, y1 = range_pad(y0, y1, ypad, self.logy)
# Set y-baseline
if y0 < 0:
y1 = max([y1, 0])
elif self.logy:
y0 = (ydim.range[0] or (10**(np.log10(y1)-2)) if y1 else 0.01)
else:
y0 = 0
# Ensure x-axis is picked up as categorical
x0 = xdim.pprint_value(extents[0])
x1 = xdim.pprint_value(extents[2])
return (x0, y0, x1, y1) | def function[get_extents, parameter[self, element, ranges, range_type]]:
constant[
Make adjustments to plot extents by computing
stacked bar heights, adjusting the bar baseline
and forcing the x-axis to be categorical.
]
if name[self].batched begin[:]
variable[overlay] assign[=] name[self].current_frame
variable[element] assign[=] call[name[Bars], parameter[call[name[overlay].table, parameter[]]]]
for taget[name[kd]] in starred[name[overlay].kdims] begin[:]
call[call[name[ranges]][name[kd].name]][constant[combined]] assign[=] call[name[overlay].range, parameter[name[kd]]]
variable[extents] assign[=] call[call[name[super], parameter[name[BarPlot], name[self]]].get_extents, parameter[name[element], name[ranges], name[range_type]]]
variable[xdim] assign[=] call[name[element].kdims][constant[0]]
variable[ydim] assign[=] call[name[element].vdims][constant[0]]
if <ast.BoolOp object at 0x7da1b1ace110> begin[:]
variable[ds] assign[=] call[name[Dataset], parameter[name[element]]]
variable[pos_range] assign[=] call[call[call[name[ds].select, parameter[]].aggregate, parameter[name[xdim]]].range, parameter[name[ydim]]]
variable[neg_range] assign[=] call[call[call[name[ds].select, parameter[]].aggregate, parameter[name[xdim]]].range, parameter[name[ydim]]]
<ast.Tuple object at 0x7da18dc06f80> assign[=] call[name[max_range], parameter[list[[<ast.Name object at 0x7da18dc064d0>, <ast.Name object at 0x7da18dc06d10>]]]]
variable[padding] assign[=] <ast.IfExp object at 0x7da18dc07610>
<ast.Tuple object at 0x7da18dc07eb0> assign[=] call[name[get_axis_padding], parameter[name[padding]]]
<ast.Tuple object at 0x7da18dc053f0> assign[=] call[name[range_pad], parameter[name[y0], name[y1], name[ypad], name[self].logy]]
if compare[name[y0] less[<] constant[0]] begin[:]
variable[y1] assign[=] call[name[max], parameter[list[[<ast.Name object at 0x7da18dc04e20>, <ast.Constant object at 0x7da18dc04a00>]]]]
variable[x0] assign[=] call[name[xdim].pprint_value, parameter[call[name[extents]][constant[0]]]]
variable[x1] assign[=] call[name[xdim].pprint_value, parameter[call[name[extents]][constant[2]]]]
return[tuple[[<ast.Name object at 0x7da18dc04970>, <ast.Name object at 0x7da18dc06470>, <ast.Name object at 0x7da18dc07460>, <ast.Name object at 0x7da18dc07cd0>]]] | keyword[def] identifier[get_extents] ( identifier[self] , identifier[element] , identifier[ranges] , identifier[range_type] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[batched] :
identifier[overlay] = identifier[self] . identifier[current_frame]
identifier[element] = identifier[Bars] ( identifier[overlay] . identifier[table] (), identifier[kdims] = identifier[element] . identifier[kdims] + identifier[overlay] . identifier[kdims] ,
identifier[vdims] = identifier[element] . identifier[vdims] )
keyword[for] identifier[kd] keyword[in] identifier[overlay] . identifier[kdims] :
identifier[ranges] [ identifier[kd] . identifier[name] ][ literal[string] ]= identifier[overlay] . identifier[range] ( identifier[kd] )
identifier[extents] = identifier[super] ( identifier[BarPlot] , identifier[self] ). identifier[get_extents] ( identifier[element] , identifier[ranges] , identifier[range_type] )
identifier[xdim] = identifier[element] . identifier[kdims] [ literal[int] ]
identifier[ydim] = identifier[element] . identifier[vdims] [ literal[int] ]
keyword[if] identifier[self] . identifier[stacked] keyword[or] identifier[self] . identifier[stack_index] :
identifier[ds] = identifier[Dataset] ( identifier[element] )
identifier[pos_range] = identifier[ds] . identifier[select] (**{ identifier[ydim] . identifier[name] :( literal[int] , keyword[None] )}). identifier[aggregate] ( identifier[xdim] , identifier[function] = identifier[np] . identifier[sum] ). identifier[range] ( identifier[ydim] )
identifier[neg_range] = identifier[ds] . identifier[select] (**{ identifier[ydim] . identifier[name] :( keyword[None] , literal[int] )}). identifier[aggregate] ( identifier[xdim] , identifier[function] = identifier[np] . identifier[sum] ). identifier[range] ( identifier[ydim] )
identifier[y0] , identifier[y1] = identifier[max_range] ([ identifier[pos_range] , identifier[neg_range] ])
keyword[else] :
identifier[y0] , identifier[y1] = identifier[ranges] [ identifier[ydim] . identifier[name] ][ literal[string] ]
identifier[padding] = literal[int] keyword[if] identifier[self] . identifier[overlaid] keyword[else] identifier[self] . identifier[padding]
identifier[_] , identifier[ypad] , identifier[_] = identifier[get_axis_padding] ( identifier[padding] )
identifier[y0] , identifier[y1] = identifier[range_pad] ( identifier[y0] , identifier[y1] , identifier[ypad] , identifier[self] . identifier[logy] )
keyword[if] identifier[y0] < literal[int] :
identifier[y1] = identifier[max] ([ identifier[y1] , literal[int] ])
keyword[elif] identifier[self] . identifier[logy] :
identifier[y0] =( identifier[ydim] . identifier[range] [ literal[int] ] keyword[or] ( literal[int] **( identifier[np] . identifier[log10] ( identifier[y1] )- literal[int] )) keyword[if] identifier[y1] keyword[else] literal[int] )
keyword[else] :
identifier[y0] = literal[int]
identifier[x0] = identifier[xdim] . identifier[pprint_value] ( identifier[extents] [ literal[int] ])
identifier[x1] = identifier[xdim] . identifier[pprint_value] ( identifier[extents] [ literal[int] ])
keyword[return] ( identifier[x0] , identifier[y0] , identifier[x1] , identifier[y1] ) | def get_extents(self, element, ranges, range_type='combined'):
"""
Make adjustments to plot extents by computing
stacked bar heights, adjusting the bar baseline
and forcing the x-axis to be categorical.
"""
if self.batched:
overlay = self.current_frame
element = Bars(overlay.table(), kdims=element.kdims + overlay.kdims, vdims=element.vdims)
for kd in overlay.kdims:
ranges[kd.name]['combined'] = overlay.range(kd) # depends on [control=['for'], data=['kd']] # depends on [control=['if'], data=[]]
extents = super(BarPlot, self).get_extents(element, ranges, range_type)
xdim = element.kdims[0]
ydim = element.vdims[0]
# Compute stack heights
if self.stacked or self.stack_index:
ds = Dataset(element)
pos_range = ds.select(**{ydim.name: (0, None)}).aggregate(xdim, function=np.sum).range(ydim)
neg_range = ds.select(**{ydim.name: (None, 0)}).aggregate(xdim, function=np.sum).range(ydim)
(y0, y1) = max_range([pos_range, neg_range]) # depends on [control=['if'], data=[]]
else:
(y0, y1) = ranges[ydim.name]['combined']
padding = 0 if self.overlaid else self.padding
(_, ypad, _) = get_axis_padding(padding)
(y0, y1) = range_pad(y0, y1, ypad, self.logy)
# Set y-baseline
if y0 < 0:
y1 = max([y1, 0]) # depends on [control=['if'], data=[]]
elif self.logy:
y0 = ydim.range[0] or 10 ** (np.log10(y1) - 2) if y1 else 0.01 # depends on [control=['if'], data=[]]
else:
y0 = 0
# Ensure x-axis is picked up as categorical
x0 = xdim.pprint_value(extents[0])
x1 = xdim.pprint_value(extents[2])
return (x0, y0, x1, y1) |
def insertPrimaryDataset(self, businput):
"""
Input dictionary has to have the following keys:
primary_ds_name, primary_ds_type, creation_date, create_by.
it builds the correct dictionary for dao input and executes the dao
"""
conn = self.dbi.connection()
tran = conn.begin()
#checking for required fields
if "primary_ds_name" not in businput:
dbsExceptionHandler("dbsException-invalid-input",
" DBSPrimaryDataset/insertPrimaryDataset. " +
"Primary dataset Name is required for insertPrimaryDataset.")
try:
businput["primary_ds_type_id"] = (self.primdstypeList.execute(conn, businput["primary_ds_type"]
))[0]["primary_ds_type_id"]
del businput["primary_ds_type"]
businput["primary_ds_id"] = self.sm.increment(conn, "SEQ_PDS")
self.primdsin.execute(conn, businput, tran)
tran.commit()
tran = None
except KeyError as ke:
dbsExceptionHandler("dbsException-invalid-input",
" DBSPrimaryDataset/insertPrimaryDataset. Missing: %s" % ke)
self.logger.warning(" DBSPrimaryDataset/insertPrimaryDataset. Missing: %s" % ke)
except IndexError as ie:
dbsExceptionHandler("dbsException-missing-data",
" DBSPrimaryDataset/insertPrimaryDataset. %s" % ie)
self.logger.warning(" DBSPrimaryDataset/insertPrimaryDataset. Missing: %s" % ie)
except Exception as ex:
if (str(ex).lower().find("unique constraint") != -1 or
str(ex).lower().find("duplicate") != -1):
self.logger.warning("DBSPrimaryDataset/insertPrimaryDataset:" +
" Unique constraint violation being ignored...")
self.logger.warning(ex)
else:
if tran:
tran.rollback()
if conn: conn.close()
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close() | def function[insertPrimaryDataset, parameter[self, businput]]:
constant[
Input dictionary has to have the following keys:
primary_ds_name, primary_ds_type, creation_date, create_by.
it builds the correct dictionary for dao input and executes the dao
]
variable[conn] assign[=] call[name[self].dbi.connection, parameter[]]
variable[tran] assign[=] call[name[conn].begin, parameter[]]
if compare[constant[primary_ds_name] <ast.NotIn object at 0x7da2590d7190> name[businput]] begin[:]
call[name[dbsExceptionHandler], parameter[constant[dbsException-invalid-input], binary_operation[constant[ DBSPrimaryDataset/insertPrimaryDataset. ] + constant[Primary dataset Name is required for insertPrimaryDataset.]]]]
<ast.Try object at 0x7da1b10c0430> | keyword[def] identifier[insertPrimaryDataset] ( identifier[self] , identifier[businput] ):
literal[string]
identifier[conn] = identifier[self] . identifier[dbi] . identifier[connection] ()
identifier[tran] = identifier[conn] . identifier[begin] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[businput] :
identifier[dbsExceptionHandler] ( literal[string] ,
literal[string] +
literal[string] )
keyword[try] :
identifier[businput] [ literal[string] ]=( identifier[self] . identifier[primdstypeList] . identifier[execute] ( identifier[conn] , identifier[businput] [ literal[string] ]
))[ literal[int] ][ literal[string] ]
keyword[del] identifier[businput] [ literal[string] ]
identifier[businput] [ literal[string] ]= identifier[self] . identifier[sm] . identifier[increment] ( identifier[conn] , literal[string] )
identifier[self] . identifier[primdsin] . identifier[execute] ( identifier[conn] , identifier[businput] , identifier[tran] )
identifier[tran] . identifier[commit] ()
identifier[tran] = keyword[None]
keyword[except] identifier[KeyError] keyword[as] identifier[ke] :
identifier[dbsExceptionHandler] ( literal[string] ,
literal[string] % identifier[ke] )
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] % identifier[ke] )
keyword[except] identifier[IndexError] keyword[as] identifier[ie] :
identifier[dbsExceptionHandler] ( literal[string] ,
literal[string] % identifier[ie] )
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] % identifier[ie] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[if] ( identifier[str] ( identifier[ex] ). identifier[lower] (). identifier[find] ( literal[string] )!=- literal[int] keyword[or]
identifier[str] ( identifier[ex] ). identifier[lower] (). identifier[find] ( literal[string] )!=- literal[int] ):
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] +
literal[string] )
identifier[self] . identifier[logger] . identifier[warning] ( identifier[ex] )
keyword[else] :
keyword[if] identifier[tran] :
identifier[tran] . identifier[rollback] ()
keyword[if] identifier[conn] : identifier[conn] . identifier[close] ()
keyword[raise]
keyword[finally] :
keyword[if] identifier[tran] :
identifier[tran] . identifier[rollback] ()
keyword[if] identifier[conn] :
identifier[conn] . identifier[close] () | def insertPrimaryDataset(self, businput):
"""
Input dictionary has to have the following keys:
primary_ds_name, primary_ds_type, creation_date, create_by.
it builds the correct dictionary for dao input and executes the dao
"""
conn = self.dbi.connection()
tran = conn.begin()
#checking for required fields
if 'primary_ds_name' not in businput:
dbsExceptionHandler('dbsException-invalid-input', ' DBSPrimaryDataset/insertPrimaryDataset. ' + 'Primary dataset Name is required for insertPrimaryDataset.') # depends on [control=['if'], data=[]]
try:
businput['primary_ds_type_id'] = self.primdstypeList.execute(conn, businput['primary_ds_type'])[0]['primary_ds_type_id']
del businput['primary_ds_type']
businput['primary_ds_id'] = self.sm.increment(conn, 'SEQ_PDS')
self.primdsin.execute(conn, businput, tran)
tran.commit()
tran = None # depends on [control=['try'], data=[]]
except KeyError as ke:
dbsExceptionHandler('dbsException-invalid-input', ' DBSPrimaryDataset/insertPrimaryDataset. Missing: %s' % ke)
self.logger.warning(' DBSPrimaryDataset/insertPrimaryDataset. Missing: %s' % ke) # depends on [control=['except'], data=['ke']]
except IndexError as ie:
dbsExceptionHandler('dbsException-missing-data', ' DBSPrimaryDataset/insertPrimaryDataset. %s' % ie)
self.logger.warning(' DBSPrimaryDataset/insertPrimaryDataset. Missing: %s' % ie) # depends on [control=['except'], data=['ie']]
except Exception as ex:
if str(ex).lower().find('unique constraint') != -1 or str(ex).lower().find('duplicate') != -1:
self.logger.warning('DBSPrimaryDataset/insertPrimaryDataset:' + ' Unique constraint violation being ignored...')
self.logger.warning(ex) # depends on [control=['if'], data=[]]
else:
if tran:
tran.rollback() # depends on [control=['if'], data=[]]
if conn:
conn.close() # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['ex']]
finally:
if tran:
tran.rollback() # depends on [control=['if'], data=[]]
if conn:
conn.close() # depends on [control=['if'], data=[]] |
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
# total number of words in reference sentences
m = len(_split_into_words(reference_sentences))
# total number of words in evaluated sentences
n = len(_split_into_words(evaluated_sentences))
union_lcs_sum_across_all_references = 0
for ref_s in reference_sentences:
union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,
ref_s)
return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n) | def function[rouge_l_summary_level, parameter[evaluated_sentences, reference_sentences]]:
constant[
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
]
if <ast.BoolOp object at 0x7da1b1d22b90> begin[:]
<ast.Raise object at 0x7da20c6aab60>
variable[m] assign[=] call[name[len], parameter[call[name[_split_into_words], parameter[name[reference_sentences]]]]]
variable[n] assign[=] call[name[len], parameter[call[name[_split_into_words], parameter[name[evaluated_sentences]]]]]
variable[union_lcs_sum_across_all_references] assign[=] constant[0]
for taget[name[ref_s]] in starred[name[reference_sentences]] begin[:]
<ast.AugAssign object at 0x7da1b1d216c0>
return[call[name[_f_p_r_lcs], parameter[name[union_lcs_sum_across_all_references], name[m], name[n]]]] | keyword[def] identifier[rouge_l_summary_level] ( identifier[evaluated_sentences] , identifier[reference_sentences] ):
literal[string]
keyword[if] identifier[len] ( identifier[evaluated_sentences] )<= literal[int] keyword[or] identifier[len] ( identifier[reference_sentences] )<= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[m] = identifier[len] ( identifier[_split_into_words] ( identifier[reference_sentences] ))
identifier[n] = identifier[len] ( identifier[_split_into_words] ( identifier[evaluated_sentences] ))
identifier[union_lcs_sum_across_all_references] = literal[int]
keyword[for] identifier[ref_s] keyword[in] identifier[reference_sentences] :
identifier[union_lcs_sum_across_all_references] += identifier[_union_lcs] ( identifier[evaluated_sentences] ,
identifier[ref_s] )
keyword[return] identifier[_f_p_r_lcs] ( identifier[union_lcs_sum_across_all_references] , identifier[m] , identifier[n] ) | def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError('Collections must contain at least 1 sentence.') # depends on [control=['if'], data=[]]
# total number of words in reference sentences
m = len(_split_into_words(reference_sentences))
# total number of words in evaluated sentences
n = len(_split_into_words(evaluated_sentences))
union_lcs_sum_across_all_references = 0
for ref_s in reference_sentences:
union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences, ref_s) # depends on [control=['for'], data=['ref_s']]
return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n) |
def global_closeness_centrality(g, node=None, normalize=True):
"""
Calculates global closeness centrality for one or all nodes in the network.
See :func:`.node_global_closeness_centrality` for more information.
Parameters
----------
g : networkx.Graph
normalize : boolean
If True, normalizes centrality based on the average shortest path
length. Default is True.
Returns
-------
C : dict
Dictionary of results, with node identifiers as keys and gcc as values.
"""
if not node:
C = {}
for node in g.nodes():
C[node] = global_closeness_centrality(g, node, normalize=normalize)
return C
values = nx.shortest_path_length(g, node).values()
c = sum([1./pl for pl in values if pl != 0.]) / len(g)
if normalize:
ac = 0
for sg in nx.connected_component_subgraphs(g):
if len(sg.nodes()) > 1:
aspl = nx.average_shortest_path_length(sg)
ac += (1./aspl) * (float(len(sg)) / float(len(g))**2 )
c = c/ac
return c | def function[global_closeness_centrality, parameter[g, node, normalize]]:
constant[
Calculates global closeness centrality for one or all nodes in the network.
See :func:`.node_global_closeness_centrality` for more information.
Parameters
----------
g : networkx.Graph
normalize : boolean
If True, normalizes centrality based on the average shortest path
length. Default is True.
Returns
-------
C : dict
Dictionary of results, with node identifiers as keys and gcc as values.
]
if <ast.UnaryOp object at 0x7da1b1253af0> begin[:]
variable[C] assign[=] dictionary[[], []]
for taget[name[node]] in starred[call[name[g].nodes, parameter[]]] begin[:]
call[name[C]][name[node]] assign[=] call[name[global_closeness_centrality], parameter[name[g], name[node]]]
return[name[C]]
variable[values] assign[=] call[call[name[nx].shortest_path_length, parameter[name[g], name[node]]].values, parameter[]]
variable[c] assign[=] binary_operation[call[name[sum], parameter[<ast.ListComp object at 0x7da1b12c17e0>]] / call[name[len], parameter[name[g]]]]
if name[normalize] begin[:]
variable[ac] assign[=] constant[0]
for taget[name[sg]] in starred[call[name[nx].connected_component_subgraphs, parameter[name[g]]]] begin[:]
if compare[call[name[len], parameter[call[name[sg].nodes, parameter[]]]] greater[>] constant[1]] begin[:]
variable[aspl] assign[=] call[name[nx].average_shortest_path_length, parameter[name[sg]]]
<ast.AugAssign object at 0x7da1b12c3c40>
variable[c] assign[=] binary_operation[name[c] / name[ac]]
return[name[c]] | keyword[def] identifier[global_closeness_centrality] ( identifier[g] , identifier[node] = keyword[None] , identifier[normalize] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[node] :
identifier[C] ={}
keyword[for] identifier[node] keyword[in] identifier[g] . identifier[nodes] ():
identifier[C] [ identifier[node] ]= identifier[global_closeness_centrality] ( identifier[g] , identifier[node] , identifier[normalize] = identifier[normalize] )
keyword[return] identifier[C]
identifier[values] = identifier[nx] . identifier[shortest_path_length] ( identifier[g] , identifier[node] ). identifier[values] ()
identifier[c] = identifier[sum] ([ literal[int] / identifier[pl] keyword[for] identifier[pl] keyword[in] identifier[values] keyword[if] identifier[pl] != literal[int] ])/ identifier[len] ( identifier[g] )
keyword[if] identifier[normalize] :
identifier[ac] = literal[int]
keyword[for] identifier[sg] keyword[in] identifier[nx] . identifier[connected_component_subgraphs] ( identifier[g] ):
keyword[if] identifier[len] ( identifier[sg] . identifier[nodes] ())> literal[int] :
identifier[aspl] = identifier[nx] . identifier[average_shortest_path_length] ( identifier[sg] )
identifier[ac] +=( literal[int] / identifier[aspl] )*( identifier[float] ( identifier[len] ( identifier[sg] ))/ identifier[float] ( identifier[len] ( identifier[g] ))** literal[int] )
identifier[c] = identifier[c] / identifier[ac]
keyword[return] identifier[c] | def global_closeness_centrality(g, node=None, normalize=True):
"""
Calculates global closeness centrality for one or all nodes in the network.
See :func:`.node_global_closeness_centrality` for more information.
Parameters
----------
g : networkx.Graph
normalize : boolean
If True, normalizes centrality based on the average shortest path
length. Default is True.
Returns
-------
C : dict
Dictionary of results, with node identifiers as keys and gcc as values.
"""
if not node:
C = {}
for node in g.nodes():
C[node] = global_closeness_centrality(g, node, normalize=normalize) # depends on [control=['for'], data=['node']]
return C # depends on [control=['if'], data=[]]
values = nx.shortest_path_length(g, node).values()
c = sum([1.0 / pl for pl in values if pl != 0.0]) / len(g)
if normalize:
ac = 0
for sg in nx.connected_component_subgraphs(g):
if len(sg.nodes()) > 1:
aspl = nx.average_shortest_path_length(sg)
ac += 1.0 / aspl * (float(len(sg)) / float(len(g)) ** 2) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sg']]
c = c / ac # depends on [control=['if'], data=[]]
return c |
def get_percentage_lattice_parameter_changes(self):
"""
Returns the percentage lattice parameter changes.
Returns:
A dict of the percentage change in lattice parameter, e.g.,
{'a': 0.012, 'b': 0.021, 'c': -0.031} implies a change of 1.2%,
2.1% and -3.1% in the a, b and c lattice parameters respectively.
"""
initial_latt = self.initial.lattice
final_latt = self.final.lattice
d = {l: getattr(final_latt, l) / getattr(initial_latt, l) - 1
for l in ["a", "b", "c"]}
return d | def function[get_percentage_lattice_parameter_changes, parameter[self]]:
constant[
Returns the percentage lattice parameter changes.
Returns:
A dict of the percentage change in lattice parameter, e.g.,
{'a': 0.012, 'b': 0.021, 'c': -0.031} implies a change of 1.2%,
2.1% and -3.1% in the a, b and c lattice parameters respectively.
]
variable[initial_latt] assign[=] name[self].initial.lattice
variable[final_latt] assign[=] name[self].final.lattice
variable[d] assign[=] <ast.DictComp object at 0x7da20c7cbfa0>
return[name[d]] | keyword[def] identifier[get_percentage_lattice_parameter_changes] ( identifier[self] ):
literal[string]
identifier[initial_latt] = identifier[self] . identifier[initial] . identifier[lattice]
identifier[final_latt] = identifier[self] . identifier[final] . identifier[lattice]
identifier[d] ={ identifier[l] : identifier[getattr] ( identifier[final_latt] , identifier[l] )/ identifier[getattr] ( identifier[initial_latt] , identifier[l] )- literal[int]
keyword[for] identifier[l] keyword[in] [ literal[string] , literal[string] , literal[string] ]}
keyword[return] identifier[d] | def get_percentage_lattice_parameter_changes(self):
"""
Returns the percentage lattice parameter changes.
Returns:
A dict of the percentage change in lattice parameter, e.g.,
{'a': 0.012, 'b': 0.021, 'c': -0.031} implies a change of 1.2%,
2.1% and -3.1% in the a, b and c lattice parameters respectively.
"""
initial_latt = self.initial.lattice
final_latt = self.final.lattice
d = {l: getattr(final_latt, l) / getattr(initial_latt, l) - 1 for l in ['a', 'b', 'c']}
return d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.