code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def make_get_exception_details_message(self, seq, thread_id, topmost_frame):
"""Returns exception details as XML """
try:
# If the debugger is not suspended, just return the thread and its id.
cmd_text = ['<xml><thread id="%s" ' % (thread_id,)]
if topmost_frame is not None:
try:
frame = topmost_frame
topmost_frame = None
while frame is not None:
if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'):
arg = frame.f_locals.get('arg', None)
if arg is not None:
exc_type, exc_desc, _thread_suspend_str, thread_stack_str = self._make_send_curr_exception_trace_str(
thread_id, *arg)
cmd_text.append('exc_type="%s" ' % (exc_type,))
cmd_text.append('exc_desc="%s" ' % (exc_desc,))
cmd_text.append('>')
cmd_text.append(thread_stack_str)
break
frame = frame.f_back
else:
cmd_text.append('>')
finally:
frame = None
cmd_text.append('</thread></xml>')
return NetCommand(CMD_GET_EXCEPTION_DETAILS, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str()) | def function[make_get_exception_details_message, parameter[self, seq, thread_id, topmost_frame]]:
constant[Returns exception details as XML ]
<ast.Try object at 0x7da1b08da890> | keyword[def] identifier[make_get_exception_details_message] ( identifier[self] , identifier[seq] , identifier[thread_id] , identifier[topmost_frame] ):
literal[string]
keyword[try] :
identifier[cmd_text] =[ literal[string] %( identifier[thread_id] ,)]
keyword[if] identifier[topmost_frame] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[frame] = identifier[topmost_frame]
identifier[topmost_frame] = keyword[None]
keyword[while] identifier[frame] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[frame] . identifier[f_code] . identifier[co_name] == literal[string] keyword[and] identifier[frame] . identifier[f_code] . identifier[co_filename] . identifier[endswith] ( literal[string] ):
identifier[arg] = identifier[frame] . identifier[f_locals] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[arg] keyword[is] keyword[not] keyword[None] :
identifier[exc_type] , identifier[exc_desc] , identifier[_thread_suspend_str] , identifier[thread_stack_str] = identifier[self] . identifier[_make_send_curr_exception_trace_str] (
identifier[thread_id] ,* identifier[arg] )
identifier[cmd_text] . identifier[append] ( literal[string] %( identifier[exc_type] ,))
identifier[cmd_text] . identifier[append] ( literal[string] %( identifier[exc_desc] ,))
identifier[cmd_text] . identifier[append] ( literal[string] )
identifier[cmd_text] . identifier[append] ( identifier[thread_stack_str] )
keyword[break]
identifier[frame] = identifier[frame] . identifier[f_back]
keyword[else] :
identifier[cmd_text] . identifier[append] ( literal[string] )
keyword[finally] :
identifier[frame] = keyword[None]
identifier[cmd_text] . identifier[append] ( literal[string] )
keyword[return] identifier[NetCommand] ( identifier[CMD_GET_EXCEPTION_DETAILS] , identifier[seq] , literal[string] . identifier[join] ( identifier[cmd_text] ))
keyword[except] :
keyword[return] identifier[self] . identifier[make_error_message] ( identifier[seq] , identifier[get_exception_traceback_str] ()) | def make_get_exception_details_message(self, seq, thread_id, topmost_frame):
"""Returns exception details as XML """
try:
# If the debugger is not suspended, just return the thread and its id.
cmd_text = ['<xml><thread id="%s" ' % (thread_id,)]
if topmost_frame is not None:
try:
frame = topmost_frame
topmost_frame = None
while frame is not None:
if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'):
arg = frame.f_locals.get('arg', None)
if arg is not None:
(exc_type, exc_desc, _thread_suspend_str, thread_stack_str) = self._make_send_curr_exception_trace_str(thread_id, *arg)
cmd_text.append('exc_type="%s" ' % (exc_type,))
cmd_text.append('exc_desc="%s" ' % (exc_desc,))
cmd_text.append('>')
cmd_text.append(thread_stack_str)
break # depends on [control=['if'], data=['arg']] # depends on [control=['if'], data=[]]
frame = frame.f_back # depends on [control=['while'], data=['frame']]
else:
cmd_text.append('>') # depends on [control=['try'], data=[]]
finally:
frame = None # depends on [control=['if'], data=['topmost_frame']]
cmd_text.append('</thread></xml>')
return NetCommand(CMD_GET_EXCEPTION_DETAILS, seq, ''.join(cmd_text)) # depends on [control=['try'], data=[]]
except:
return self.make_error_message(seq, get_exception_traceback_str()) # depends on [control=['except'], data=[]] |
def update_params(self, parameters):
"""Pass in a dictionary to update url parameters for NBA stats API
Parameters
----------
parameters : dict
A dict containing key, value pairs that correspond with NBA stats
API parameters.
Returns
-------
self : TeamLog
The TeamLog object containing the updated NBA stats API
parameters.
"""
self.url_paramaters.update(parameters)
self.response = requests.get(self.base_url, params=self.url_paramaters,
headers=HEADERS)
# raise error if status code is not 200
self.response.raise_for_status()
return self | def function[update_params, parameter[self, parameters]]:
constant[Pass in a dictionary to update url parameters for NBA stats API
Parameters
----------
parameters : dict
A dict containing key, value pairs that correspond with NBA stats
API parameters.
Returns
-------
self : TeamLog
The TeamLog object containing the updated NBA stats API
parameters.
]
call[name[self].url_paramaters.update, parameter[name[parameters]]]
name[self].response assign[=] call[name[requests].get, parameter[name[self].base_url]]
call[name[self].response.raise_for_status, parameter[]]
return[name[self]] | keyword[def] identifier[update_params] ( identifier[self] , identifier[parameters] ):
literal[string]
identifier[self] . identifier[url_paramaters] . identifier[update] ( identifier[parameters] )
identifier[self] . identifier[response] = identifier[requests] . identifier[get] ( identifier[self] . identifier[base_url] , identifier[params] = identifier[self] . identifier[url_paramaters] ,
identifier[headers] = identifier[HEADERS] )
identifier[self] . identifier[response] . identifier[raise_for_status] ()
keyword[return] identifier[self] | def update_params(self, parameters):
"""Pass in a dictionary to update url parameters for NBA stats API
Parameters
----------
parameters : dict
A dict containing key, value pairs that correspond with NBA stats
API parameters.
Returns
-------
self : TeamLog
The TeamLog object containing the updated NBA stats API
parameters.
"""
self.url_paramaters.update(parameters)
self.response = requests.get(self.base_url, params=self.url_paramaters, headers=HEADERS)
# raise error if status code is not 200
self.response.raise_for_status()
return self |
def _requirements_to_dict(rs):
"""Convert supported requirements into dictionary for output.
"""
out = []
added = set([])
for r in rs:
if r["class"] == "DockerRequirement" and "docker" not in added:
added.add("docker")
out.append({"requirement_type": "docker", "value": r["dockerImageId"]})
elif r["class"] == "ResourceRequirement":
if "coresMin" in r and "cpu" not in added:
added.add("cpu")
out.append({"requirement_type": "cpu", "value": r["coresMin"]})
if "ramMin" in r and "memory" not in added:
added.add("memory")
out.append({"requirement_type": "memory", "value": "%s MB" % r["ramMin"]})
if "tmpdirMin" in r and "disks" not in added:
added.add("disks")
out.append({"requirement_type": "disks", "value": "local-disk %s HDD" % r["tmpdirMin"]})
return out | def function[_requirements_to_dict, parameter[rs]]:
constant[Convert supported requirements into dictionary for output.
]
variable[out] assign[=] list[[]]
variable[added] assign[=] call[name[set], parameter[list[[]]]]
for taget[name[r]] in starred[name[rs]] begin[:]
if <ast.BoolOp object at 0x7da1b19da230> begin[:]
call[name[added].add, parameter[constant[docker]]]
call[name[out].append, parameter[dictionary[[<ast.Constant object at 0x7da1b19d9090>, <ast.Constant object at 0x7da1b19d9660>], [<ast.Constant object at 0x7da1b19d9ff0>, <ast.Subscript object at 0x7da1b19dada0>]]]]
return[name[out]] | keyword[def] identifier[_requirements_to_dict] ( identifier[rs] ):
literal[string]
identifier[out] =[]
identifier[added] = identifier[set] ([])
keyword[for] identifier[r] keyword[in] identifier[rs] :
keyword[if] identifier[r] [ literal[string] ]== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[added] :
identifier[added] . identifier[add] ( literal[string] )
identifier[out] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[r] [ literal[string] ]})
keyword[elif] identifier[r] [ literal[string] ]== literal[string] :
keyword[if] literal[string] keyword[in] identifier[r] keyword[and] literal[string] keyword[not] keyword[in] identifier[added] :
identifier[added] . identifier[add] ( literal[string] )
identifier[out] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[r] [ literal[string] ]})
keyword[if] literal[string] keyword[in] identifier[r] keyword[and] literal[string] keyword[not] keyword[in] identifier[added] :
identifier[added] . identifier[add] ( literal[string] )
identifier[out] . identifier[append] ({ literal[string] : literal[string] , literal[string] : literal[string] % identifier[r] [ literal[string] ]})
keyword[if] literal[string] keyword[in] identifier[r] keyword[and] literal[string] keyword[not] keyword[in] identifier[added] :
identifier[added] . identifier[add] ( literal[string] )
identifier[out] . identifier[append] ({ literal[string] : literal[string] , literal[string] : literal[string] % identifier[r] [ literal[string] ]})
keyword[return] identifier[out] | def _requirements_to_dict(rs):
"""Convert supported requirements into dictionary for output.
"""
out = []
added = set([])
for r in rs:
if r['class'] == 'DockerRequirement' and 'docker' not in added:
added.add('docker')
out.append({'requirement_type': 'docker', 'value': r['dockerImageId']}) # depends on [control=['if'], data=[]]
elif r['class'] == 'ResourceRequirement':
if 'coresMin' in r and 'cpu' not in added:
added.add('cpu')
out.append({'requirement_type': 'cpu', 'value': r['coresMin']}) # depends on [control=['if'], data=[]]
if 'ramMin' in r and 'memory' not in added:
added.add('memory')
out.append({'requirement_type': 'memory', 'value': '%s MB' % r['ramMin']}) # depends on [control=['if'], data=[]]
if 'tmpdirMin' in r and 'disks' not in added:
added.add('disks')
out.append({'requirement_type': 'disks', 'value': 'local-disk %s HDD' % r['tmpdirMin']}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']]
return out |
def _download_mlu_data(tmp_dir, data_dir):
"""Downloads and extracts the dataset.
Args:
tmp_dir: temp directory to download and extract the dataset
data_dir: The base directory where data and vocab files are stored.
Returns:
tmp_dir: temp directory containing the raw data.
"""
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
filename = os.path.basename(_URL)
file_path = os.path.join(tmp_dir, filename)
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/63.0.3239.132 Safari/537.36"}
resp = requests.get(_URL, headers=headers)
with open(file_path, "wb") as f:
f.write(resp.content)
with tarfile.open(file_path, "r:gz") as tar:
tar.extractall(tmp_dir)
return tmp_dir | def function[_download_mlu_data, parameter[tmp_dir, data_dir]]:
constant[Downloads and extracts the dataset.
Args:
tmp_dir: temp directory to download and extract the dataset
data_dir: The base directory where data and vocab files are stored.
Returns:
tmp_dir: temp directory containing the raw data.
]
if <ast.UnaryOp object at 0x7da1b1e15030> begin[:]
call[name[tf].gfile.MakeDirs, parameter[name[data_dir]]]
variable[filename] assign[=] call[name[os].path.basename, parameter[name[_URL]]]
variable[file_path] assign[=] call[name[os].path.join, parameter[name[tmp_dir], name[filename]]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b2089d50>], [<ast.Constant object at 0x7da1b2088370>]]
variable[resp] assign[=] call[name[requests].get, parameter[name[_URL]]]
with call[name[open], parameter[name[file_path], constant[wb]]] begin[:]
call[name[f].write, parameter[name[resp].content]]
with call[name[tarfile].open, parameter[name[file_path], constant[r:gz]]] begin[:]
call[name[tar].extractall, parameter[name[tmp_dir]]]
return[name[tmp_dir]] | keyword[def] identifier[_download_mlu_data] ( identifier[tmp_dir] , identifier[data_dir] ):
literal[string]
keyword[if] keyword[not] identifier[tf] . identifier[gfile] . identifier[Exists] ( identifier[data_dir] ):
identifier[tf] . identifier[gfile] . identifier[MakeDirs] ( identifier[data_dir] )
identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[_URL] )
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_dir] , identifier[filename] )
identifier[headers] ={ literal[string] : literal[string]
literal[string]
literal[string] }
identifier[resp] = identifier[requests] . identifier[get] ( identifier[_URL] , identifier[headers] = identifier[headers] )
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[resp] . identifier[content] )
keyword[with] identifier[tarfile] . identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[tar] :
identifier[tar] . identifier[extractall] ( identifier[tmp_dir] )
keyword[return] identifier[tmp_dir] | def _download_mlu_data(tmp_dir, data_dir):
"""Downloads and extracts the dataset.
Args:
tmp_dir: temp directory to download and extract the dataset
data_dir: The base directory where data and vocab files are stored.
Returns:
tmp_dir: temp directory containing the raw data.
"""
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir) # depends on [control=['if'], data=[]]
filename = os.path.basename(_URL)
file_path = os.path.join(tmp_dir, filename)
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
resp = requests.get(_URL, headers=headers)
with open(file_path, 'wb') as f:
f.write(resp.content) # depends on [control=['with'], data=['f']]
with tarfile.open(file_path, 'r:gz') as tar:
tar.extractall(tmp_dir) # depends on [control=['with'], data=['tar']]
return tmp_dir |
def sign_hmac(secret, payload):
"""Returns a base64-encoded HMAC-SHA1 signature of a given string.
:param secret: The key used for the signature, base64 encoded.
:type secret: string
:param payload: The payload to sign.
:type payload: string
:rtype: string
"""
payload = payload.encode('ascii', 'strict')
secret = secret.encode('ascii', 'strict')
sig = hmac.new(base64.urlsafe_b64decode(secret), payload, hashlib.sha1)
out = base64.urlsafe_b64encode(sig.digest())
return out.decode('utf-8') | def function[sign_hmac, parameter[secret, payload]]:
constant[Returns a base64-encoded HMAC-SHA1 signature of a given string.
:param secret: The key used for the signature, base64 encoded.
:type secret: string
:param payload: The payload to sign.
:type payload: string
:rtype: string
]
variable[payload] assign[=] call[name[payload].encode, parameter[constant[ascii], constant[strict]]]
variable[secret] assign[=] call[name[secret].encode, parameter[constant[ascii], constant[strict]]]
variable[sig] assign[=] call[name[hmac].new, parameter[call[name[base64].urlsafe_b64decode, parameter[name[secret]]], name[payload], name[hashlib].sha1]]
variable[out] assign[=] call[name[base64].urlsafe_b64encode, parameter[call[name[sig].digest, parameter[]]]]
return[call[name[out].decode, parameter[constant[utf-8]]]] | keyword[def] identifier[sign_hmac] ( identifier[secret] , identifier[payload] ):
literal[string]
identifier[payload] = identifier[payload] . identifier[encode] ( literal[string] , literal[string] )
identifier[secret] = identifier[secret] . identifier[encode] ( literal[string] , literal[string] )
identifier[sig] = identifier[hmac] . identifier[new] ( identifier[base64] . identifier[urlsafe_b64decode] ( identifier[secret] ), identifier[payload] , identifier[hashlib] . identifier[sha1] )
identifier[out] = identifier[base64] . identifier[urlsafe_b64encode] ( identifier[sig] . identifier[digest] ())
keyword[return] identifier[out] . identifier[decode] ( literal[string] ) | def sign_hmac(secret, payload):
"""Returns a base64-encoded HMAC-SHA1 signature of a given string.
:param secret: The key used for the signature, base64 encoded.
:type secret: string
:param payload: The payload to sign.
:type payload: string
:rtype: string
"""
payload = payload.encode('ascii', 'strict')
secret = secret.encode('ascii', 'strict')
sig = hmac.new(base64.urlsafe_b64decode(secret), payload, hashlib.sha1)
out = base64.urlsafe_b64encode(sig.digest())
return out.decode('utf-8') |
def send_signal(signal, request, user, **kwargs):
'''Generic method to send signals to Piwik
given that we always have to compute IP and UID for instance.
'''
params = {
'user_ip': request.remote_addr
}
params.update(kwargs)
if user.is_authenticated:
params['uid'] = user.id
signal.send(request.url, **params) | def function[send_signal, parameter[signal, request, user]]:
constant[Generic method to send signals to Piwik
given that we always have to compute IP and UID for instance.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b11d8820>], [<ast.Attribute object at 0x7da1b11d93f0>]]
call[name[params].update, parameter[name[kwargs]]]
if name[user].is_authenticated begin[:]
call[name[params]][constant[uid]] assign[=] name[user].id
call[name[signal].send, parameter[name[request].url]] | keyword[def] identifier[send_signal] ( identifier[signal] , identifier[request] , identifier[user] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={
literal[string] : identifier[request] . identifier[remote_addr]
}
identifier[params] . identifier[update] ( identifier[kwargs] )
keyword[if] identifier[user] . identifier[is_authenticated] :
identifier[params] [ literal[string] ]= identifier[user] . identifier[id]
identifier[signal] . identifier[send] ( identifier[request] . identifier[url] ,** identifier[params] ) | def send_signal(signal, request, user, **kwargs):
"""Generic method to send signals to Piwik
given that we always have to compute IP and UID for instance.
"""
params = {'user_ip': request.remote_addr}
params.update(kwargs)
if user.is_authenticated:
params['uid'] = user.id # depends on [control=['if'], data=[]]
signal.send(request.url, **params) |
def build_binary_dist_helper(self, requirement, setup_command):
"""
Convert an unpacked source distribution to a binary distribution.
:param requirement: A :class:`.Requirement` object.
:param setup_command: A list of strings with the arguments to
``setup.py``.
:returns: The pathname of the resulting binary distribution (a string).
:raises: :exc:`.BuildFailed` when the build reports an error (e.g.
because of missing binary dependencies like system
libraries).
:raises: :exc:`.NoBuildOutput` when the build does not produce the
expected binary distribution archive.
"""
build_timer = Timer()
# Make sure the source distribution contains a setup script.
setup_script = os.path.join(requirement.source_directory, 'setup.py')
if not os.path.isfile(setup_script):
msg = "Directory %s (%s %s) doesn't contain a source distribution!"
raise InvalidSourceDistribution(msg % (requirement.source_directory, requirement.name, requirement.version))
# Let the user know what's going on.
build_text = "Building %s binary distribution" % requirement
logger.info("%s ..", build_text)
# Cleanup previously generated distributions.
dist_directory = os.path.join(requirement.source_directory, 'dist')
if os.path.isdir(dist_directory):
logger.debug("Cleaning up previously generated distributions in %s ..", dist_directory)
shutil.rmtree(dist_directory)
# Let the user know (approximately) which command is being executed
# (I don't think it's necessary to show them the nasty details :-).
logger.debug("Executing external command: %s",
' '.join(map(pipes.quote, [self.config.python_executable, 'setup.py'] + setup_command)))
# Compose the command line needed to build the binary distribution.
# This nasty command line forces the use of setuptools (instead of
# distutils) just like pip does. This will cause the `*.egg-info'
# metadata to be written to a directory instead of a file, which
# (amongst other things) enables tracking of installed files.
command_line = [
self.config.python_executable, '-c',
';'.join([
'import setuptools',
'__file__=%r' % setup_script,
r"exec(compile(open(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))",
])
] + setup_command
# Redirect all output of the build to a temporary file.
fd, temporary_file = tempfile.mkstemp()
try:
# Start the build.
build = subprocess.Popen(command_line, cwd=requirement.source_directory, stdout=fd, stderr=fd)
# Wait for the build to finish and provide feedback to the user in the mean time.
spinner = Spinner(label=build_text, timer=build_timer)
while build.poll() is None:
spinner.step()
# Don't tax the CPU too much.
time.sleep(0.2)
spinner.clear()
# Make sure the build succeeded and produced a binary distribution archive.
try:
# If the build reported an error we'll try to provide the user with
# some hints about what went wrong.
if build.returncode != 0:
raise BuildFailed("Failed to build {name} ({version}) binary distribution!",
name=requirement.name, version=requirement.version)
# Check if the build created the `dist' directory (the os.listdir()
# call below will raise an exception if we don't check for this).
if not os.path.isdir(dist_directory):
raise NoBuildOutput("Build of {name} ({version}) did not produce a binary distribution archive!",
name=requirement.name, version=requirement.version)
# Check if we can find the binary distribution archive.
filenames = os.listdir(dist_directory)
if len(filenames) != 1:
variables = dict(name=requirement.name,
version=requirement.version,
filenames=concatenate(sorted(filenames)))
raise NoBuildOutput("""
Build of {name} ({version}) produced more than one
distribution archive! (matches: {filenames})
""", **variables)
except Exception as e:
# Decorate the exception with the output of the failed build.
with open(temporary_file) as handle:
build_output = handle.read()
enhanced_message = compact("""
{message}
Please check the build output because it will probably
provide a hint about what went wrong.
Build output:
{output}
""", message=e.args[0], output=build_output.strip())
e.args = (enhanced_message,)
raise
logger.info("Finished building %s in %s.", requirement.name, build_timer)
return os.path.join(dist_directory, filenames[0])
finally:
# Close file descriptor before removing the temporary file.
# Without closing Windows is complaining that the file cannot
# be removed because it is used by another process.
os.close(fd)
os.unlink(temporary_file) | def function[build_binary_dist_helper, parameter[self, requirement, setup_command]]:
constant[
Convert an unpacked source distribution to a binary distribution.
:param requirement: A :class:`.Requirement` object.
:param setup_command: A list of strings with the arguments to
``setup.py``.
:returns: The pathname of the resulting binary distribution (a string).
:raises: :exc:`.BuildFailed` when the build reports an error (e.g.
because of missing binary dependencies like system
libraries).
:raises: :exc:`.NoBuildOutput` when the build does not produce the
expected binary distribution archive.
]
variable[build_timer] assign[=] call[name[Timer], parameter[]]
variable[setup_script] assign[=] call[name[os].path.join, parameter[name[requirement].source_directory, constant[setup.py]]]
if <ast.UnaryOp object at 0x7da1b0415d50> begin[:]
variable[msg] assign[=] constant[Directory %s (%s %s) doesn't contain a source distribution!]
<ast.Raise object at 0x7da1b0417850>
variable[build_text] assign[=] binary_operation[constant[Building %s binary distribution] <ast.Mod object at 0x7da2590d6920> name[requirement]]
call[name[logger].info, parameter[constant[%s ..], name[build_text]]]
variable[dist_directory] assign[=] call[name[os].path.join, parameter[name[requirement].source_directory, constant[dist]]]
if call[name[os].path.isdir, parameter[name[dist_directory]]] begin[:]
call[name[logger].debug, parameter[constant[Cleaning up previously generated distributions in %s ..], name[dist_directory]]]
call[name[shutil].rmtree, parameter[name[dist_directory]]]
call[name[logger].debug, parameter[constant[Executing external command: %s], call[constant[ ].join, parameter[call[name[map], parameter[name[pipes].quote, binary_operation[list[[<ast.Attribute object at 0x7da1b031ddb0>, <ast.Constant object at 0x7da1b031cee0>]] + name[setup_command]]]]]]]]
variable[command_line] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da1b031ec80>, <ast.Constant object at 0x7da1b031e9b0>, <ast.Call object at 0x7da1b031d7e0>]] + name[setup_command]]
<ast.Tuple object at 0x7da1b031d6c0> assign[=] call[name[tempfile].mkstemp, parameter[]]
<ast.Try object at 0x7da1b031c070> | keyword[def] identifier[build_binary_dist_helper] ( identifier[self] , identifier[requirement] , identifier[setup_command] ):
literal[string]
identifier[build_timer] = identifier[Timer] ()
identifier[setup_script] = identifier[os] . identifier[path] . identifier[join] ( identifier[requirement] . identifier[source_directory] , literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[setup_script] ):
identifier[msg] = literal[string]
keyword[raise] identifier[InvalidSourceDistribution] ( identifier[msg] %( identifier[requirement] . identifier[source_directory] , identifier[requirement] . identifier[name] , identifier[requirement] . identifier[version] ))
identifier[build_text] = literal[string] % identifier[requirement]
identifier[logger] . identifier[info] ( literal[string] , identifier[build_text] )
identifier[dist_directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[requirement] . identifier[source_directory] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[dist_directory] ):
identifier[logger] . identifier[debug] ( literal[string] , identifier[dist_directory] )
identifier[shutil] . identifier[rmtree] ( identifier[dist_directory] )
identifier[logger] . identifier[debug] ( literal[string] ,
literal[string] . identifier[join] ( identifier[map] ( identifier[pipes] . identifier[quote] ,[ identifier[self] . identifier[config] . identifier[python_executable] , literal[string] ]+ identifier[setup_command] )))
identifier[command_line] =[
identifier[self] . identifier[config] . identifier[python_executable] , literal[string] ,
literal[string] . identifier[join] ([
literal[string] ,
literal[string] % identifier[setup_script] ,
literal[string] ,
])
]+ identifier[setup_command]
identifier[fd] , identifier[temporary_file] = identifier[tempfile] . identifier[mkstemp] ()
keyword[try] :
identifier[build] = identifier[subprocess] . identifier[Popen] ( identifier[command_line] , identifier[cwd] = identifier[requirement] . identifier[source_directory] , identifier[stdout] = identifier[fd] , identifier[stderr] = identifier[fd] )
identifier[spinner] = identifier[Spinner] ( identifier[label] = identifier[build_text] , identifier[timer] = identifier[build_timer] )
keyword[while] identifier[build] . identifier[poll] () keyword[is] keyword[None] :
identifier[spinner] . identifier[step] ()
identifier[time] . identifier[sleep] ( literal[int] )
identifier[spinner] . identifier[clear] ()
keyword[try] :
keyword[if] identifier[build] . identifier[returncode] != literal[int] :
keyword[raise] identifier[BuildFailed] ( literal[string] ,
identifier[name] = identifier[requirement] . identifier[name] , identifier[version] = identifier[requirement] . identifier[version] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[dist_directory] ):
keyword[raise] identifier[NoBuildOutput] ( literal[string] ,
identifier[name] = identifier[requirement] . identifier[name] , identifier[version] = identifier[requirement] . identifier[version] )
identifier[filenames] = identifier[os] . identifier[listdir] ( identifier[dist_directory] )
keyword[if] identifier[len] ( identifier[filenames] )!= literal[int] :
identifier[variables] = identifier[dict] ( identifier[name] = identifier[requirement] . identifier[name] ,
identifier[version] = identifier[requirement] . identifier[version] ,
identifier[filenames] = identifier[concatenate] ( identifier[sorted] ( identifier[filenames] )))
keyword[raise] identifier[NoBuildOutput] ( literal[string] ,** identifier[variables] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[with] identifier[open] ( identifier[temporary_file] ) keyword[as] identifier[handle] :
identifier[build_output] = identifier[handle] . identifier[read] ()
identifier[enhanced_message] = identifier[compact] ( literal[string] , identifier[message] = identifier[e] . identifier[args] [ literal[int] ], identifier[output] = identifier[build_output] . identifier[strip] ())
identifier[e] . identifier[args] =( identifier[enhanced_message] ,)
keyword[raise]
identifier[logger] . identifier[info] ( literal[string] , identifier[requirement] . identifier[name] , identifier[build_timer] )
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[dist_directory] , identifier[filenames] [ literal[int] ])
keyword[finally] :
identifier[os] . identifier[close] ( identifier[fd] )
identifier[os] . identifier[unlink] ( identifier[temporary_file] ) | def build_binary_dist_helper(self, requirement, setup_command):
"""
Convert an unpacked source distribution to a binary distribution.
:param requirement: A :class:`.Requirement` object.
:param setup_command: A list of strings with the arguments to
``setup.py``.
:returns: The pathname of the resulting binary distribution (a string).
:raises: :exc:`.BuildFailed` when the build reports an error (e.g.
because of missing binary dependencies like system
libraries).
:raises: :exc:`.NoBuildOutput` when the build does not produce the
expected binary distribution archive.
"""
build_timer = Timer()
# Make sure the source distribution contains a setup script.
setup_script = os.path.join(requirement.source_directory, 'setup.py')
if not os.path.isfile(setup_script):
msg = "Directory %s (%s %s) doesn't contain a source distribution!"
raise InvalidSourceDistribution(msg % (requirement.source_directory, requirement.name, requirement.version)) # depends on [control=['if'], data=[]]
# Let the user know what's going on.
build_text = 'Building %s binary distribution' % requirement
logger.info('%s ..', build_text)
# Cleanup previously generated distributions.
dist_directory = os.path.join(requirement.source_directory, 'dist')
if os.path.isdir(dist_directory):
logger.debug('Cleaning up previously generated distributions in %s ..', dist_directory)
shutil.rmtree(dist_directory) # depends on [control=['if'], data=[]]
# Let the user know (approximately) which command is being executed
# (I don't think it's necessary to show them the nasty details :-).
logger.debug('Executing external command: %s', ' '.join(map(pipes.quote, [self.config.python_executable, 'setup.py'] + setup_command)))
# Compose the command line needed to build the binary distribution.
# This nasty command line forces the use of setuptools (instead of
# distutils) just like pip does. This will cause the `*.egg-info'
# metadata to be written to a directory instead of a file, which
# (amongst other things) enables tracking of installed files.
command_line = [self.config.python_executable, '-c', ';'.join(['import setuptools', '__file__=%r' % setup_script, "exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))"])] + setup_command
# Redirect all output of the build to a temporary file.
(fd, temporary_file) = tempfile.mkstemp()
try:
# Start the build.
build = subprocess.Popen(command_line, cwd=requirement.source_directory, stdout=fd, stderr=fd)
# Wait for the build to finish and provide feedback to the user in the mean time.
spinner = Spinner(label=build_text, timer=build_timer)
while build.poll() is None:
spinner.step()
# Don't tax the CPU too much.
time.sleep(0.2) # depends on [control=['while'], data=[]]
spinner.clear()
# Make sure the build succeeded and produced a binary distribution archive.
try:
# If the build reported an error we'll try to provide the user with
# some hints about what went wrong.
if build.returncode != 0:
raise BuildFailed('Failed to build {name} ({version}) binary distribution!', name=requirement.name, version=requirement.version) # depends on [control=['if'], data=[]]
# Check if the build created the `dist' directory (the os.listdir()
# call below will raise an exception if we don't check for this).
if not os.path.isdir(dist_directory):
raise NoBuildOutput('Build of {name} ({version}) did not produce a binary distribution archive!', name=requirement.name, version=requirement.version) # depends on [control=['if'], data=[]]
# Check if we can find the binary distribution archive.
filenames = os.listdir(dist_directory)
if len(filenames) != 1:
variables = dict(name=requirement.name, version=requirement.version, filenames=concatenate(sorted(filenames)))
raise NoBuildOutput('\n Build of {name} ({version}) produced more than one\n distribution archive! (matches: {filenames})\n ', **variables) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
# Decorate the exception with the output of the failed build.
with open(temporary_file) as handle:
build_output = handle.read() # depends on [control=['with'], data=['handle']]
enhanced_message = compact('\n {message}\n\n Please check the build output because it will probably\n provide a hint about what went wrong.\n\n Build output:\n\n {output}\n ', message=e.args[0], output=build_output.strip())
e.args = (enhanced_message,)
raise # depends on [control=['except'], data=['e']]
logger.info('Finished building %s in %s.', requirement.name, build_timer)
return os.path.join(dist_directory, filenames[0]) # depends on [control=['try'], data=[]]
finally:
# Close file descriptor before removing the temporary file.
# Without closing Windows is complaining that the file cannot
# be removed because it is used by another process.
os.close(fd)
os.unlink(temporary_file) |
def info():
"""Display app info.
Examples:
$ dj info
No application, try running dj init.
$ dj info
Application:
foo @ 2.7.9
Requirements:
Django == 1.10
"""
application = get_current_application()
info = application.info()
stdout.write(info)
return info | def function[info, parameter[]]:
constant[Display app info.
Examples:
$ dj info
No application, try running dj init.
$ dj info
Application:
foo @ 2.7.9
Requirements:
Django == 1.10
]
variable[application] assign[=] call[name[get_current_application], parameter[]]
variable[info] assign[=] call[name[application].info, parameter[]]
call[name[stdout].write, parameter[name[info]]]
return[name[info]] | keyword[def] identifier[info] ():
literal[string]
identifier[application] = identifier[get_current_application] ()
identifier[info] = identifier[application] . identifier[info] ()
identifier[stdout] . identifier[write] ( identifier[info] )
keyword[return] identifier[info] | def info():
"""Display app info.
Examples:
$ dj info
No application, try running dj init.
$ dj info
Application:
foo @ 2.7.9
Requirements:
Django == 1.10
"""
application = get_current_application()
info = application.info()
stdout.write(info)
return info |
def path_dirs(urls):
'''
Takes a StringCounter of normalized URL and parses them into
a list of path directories. The file name is
included in the path directory list.
'''
path_dirs = StringCounter()
for url in urls:
for path_dir in filter(None, urlparse(url).path.split('/')):
path_dirs[path_dir] += urls[url]
return path_dirs | def function[path_dirs, parameter[urls]]:
constant[
Takes a StringCounter of normalized URL and parses them into
a list of path directories. The file name is
included in the path directory list.
]
variable[path_dirs] assign[=] call[name[StringCounter], parameter[]]
for taget[name[url]] in starred[name[urls]] begin[:]
for taget[name[path_dir]] in starred[call[name[filter], parameter[constant[None], call[call[name[urlparse], parameter[name[url]]].path.split, parameter[constant[/]]]]]] begin[:]
<ast.AugAssign object at 0x7da18f00f970>
return[name[path_dirs]] | keyword[def] identifier[path_dirs] ( identifier[urls] ):
literal[string]
identifier[path_dirs] = identifier[StringCounter] ()
keyword[for] identifier[url] keyword[in] identifier[urls] :
keyword[for] identifier[path_dir] keyword[in] identifier[filter] ( keyword[None] , identifier[urlparse] ( identifier[url] ). identifier[path] . identifier[split] ( literal[string] )):
identifier[path_dirs] [ identifier[path_dir] ]+= identifier[urls] [ identifier[url] ]
keyword[return] identifier[path_dirs] | def path_dirs(urls):
"""
Takes a StringCounter of normalized URL and parses them into
a list of path directories. The file name is
included in the path directory list.
"""
path_dirs = StringCounter()
for url in urls:
for path_dir in filter(None, urlparse(url).path.split('/')):
path_dirs[path_dir] += urls[url] # depends on [control=['for'], data=['path_dir']] # depends on [control=['for'], data=['url']]
return path_dirs |
def _load_bot_config(index, config_bundle: BotConfigBundle,
looks_config_object: ConfigObject, overall_config: ConfigObject,
human_index_tracker: IncrementingInteger) -> PlayerConfig:
"""
Loads the config data of a single bot
:param index: This is the bot index (where it appears in game_cars)
:param bot_configuration: A config object that will eventually be transformed and sent to the game.
:param config_bundle: A config object for a single bot
:param overall_config: This is the config for the entire session not one particular bot
:param human_index_tracker: An object of type HumanIndexManager that helps set human_index correctly.
:return:
"""
bot_configuration = PlayerConfig()
bot_configuration.config_path = config_bundle.config_path
team_num = get_team(overall_config, index)
bot_configuration.team = team_num
# Setting up data about what type of bot it is
bot_type = overall_config.get(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TYPE_KEY, index)
bot_configuration.bot, bot_configuration.rlbot_controlled = get_bot_options(bot_type)
bot_configuration.bot_skill = overall_config.getfloat(
PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_BOT_SKILL_KEY, index)
if not bot_configuration.bot:
bot_configuration.human_index = human_index_tracker.increment()
# Setting up the bots name
bot_configuration.name = config_bundle.name
loadout_config = load_bot_appearance(looks_config_object, team_num)
bot_configuration.loadout_config = loadout_config
return bot_configuration | def function[_load_bot_config, parameter[index, config_bundle, looks_config_object, overall_config, human_index_tracker]]:
constant[
Loads the config data of a single bot
:param index: This is the bot index (where it appears in game_cars)
:param bot_configuration: A config object that will eventually be transformed and sent to the game.
:param config_bundle: A config object for a single bot
:param overall_config: This is the config for the entire session not one particular bot
:param human_index_tracker: An object of type HumanIndexManager that helps set human_index correctly.
:return:
]
variable[bot_configuration] assign[=] call[name[PlayerConfig], parameter[]]
name[bot_configuration].config_path assign[=] name[config_bundle].config_path
variable[team_num] assign[=] call[name[get_team], parameter[name[overall_config], name[index]]]
name[bot_configuration].team assign[=] name[team_num]
variable[bot_type] assign[=] call[name[overall_config].get, parameter[name[PARTICIPANT_CONFIGURATION_HEADER], name[PARTICIPANT_TYPE_KEY], name[index]]]
<ast.Tuple object at 0x7da18bc72ce0> assign[=] call[name[get_bot_options], parameter[name[bot_type]]]
name[bot_configuration].bot_skill assign[=] call[name[overall_config].getfloat, parameter[name[PARTICIPANT_CONFIGURATION_HEADER], name[PARTICIPANT_BOT_SKILL_KEY], name[index]]]
if <ast.UnaryOp object at 0x7da18bc72e90> begin[:]
name[bot_configuration].human_index assign[=] call[name[human_index_tracker].increment, parameter[]]
name[bot_configuration].name assign[=] name[config_bundle].name
variable[loadout_config] assign[=] call[name[load_bot_appearance], parameter[name[looks_config_object], name[team_num]]]
name[bot_configuration].loadout_config assign[=] name[loadout_config]
return[name[bot_configuration]] | keyword[def] identifier[_load_bot_config] ( identifier[index] , identifier[config_bundle] : identifier[BotConfigBundle] ,
identifier[looks_config_object] : identifier[ConfigObject] , identifier[overall_config] : identifier[ConfigObject] ,
identifier[human_index_tracker] : identifier[IncrementingInteger] )-> identifier[PlayerConfig] :
literal[string]
identifier[bot_configuration] = identifier[PlayerConfig] ()
identifier[bot_configuration] . identifier[config_path] = identifier[config_bundle] . identifier[config_path]
identifier[team_num] = identifier[get_team] ( identifier[overall_config] , identifier[index] )
identifier[bot_configuration] . identifier[team] = identifier[team_num]
identifier[bot_type] = identifier[overall_config] . identifier[get] ( identifier[PARTICIPANT_CONFIGURATION_HEADER] , identifier[PARTICIPANT_TYPE_KEY] , identifier[index] )
identifier[bot_configuration] . identifier[bot] , identifier[bot_configuration] . identifier[rlbot_controlled] = identifier[get_bot_options] ( identifier[bot_type] )
identifier[bot_configuration] . identifier[bot_skill] = identifier[overall_config] . identifier[getfloat] (
identifier[PARTICIPANT_CONFIGURATION_HEADER] , identifier[PARTICIPANT_BOT_SKILL_KEY] , identifier[index] )
keyword[if] keyword[not] identifier[bot_configuration] . identifier[bot] :
identifier[bot_configuration] . identifier[human_index] = identifier[human_index_tracker] . identifier[increment] ()
identifier[bot_configuration] . identifier[name] = identifier[config_bundle] . identifier[name]
identifier[loadout_config] = identifier[load_bot_appearance] ( identifier[looks_config_object] , identifier[team_num] )
identifier[bot_configuration] . identifier[loadout_config] = identifier[loadout_config]
keyword[return] identifier[bot_configuration] | def _load_bot_config(index, config_bundle: BotConfigBundle, looks_config_object: ConfigObject, overall_config: ConfigObject, human_index_tracker: IncrementingInteger) -> PlayerConfig:
"""
Loads the config data of a single bot
:param index: This is the bot index (where it appears in game_cars)
:param bot_configuration: A config object that will eventually be transformed and sent to the game.
:param config_bundle: A config object for a single bot
:param overall_config: This is the config for the entire session not one particular bot
:param human_index_tracker: An object of type HumanIndexManager that helps set human_index correctly.
:return:
"""
bot_configuration = PlayerConfig()
bot_configuration.config_path = config_bundle.config_path
team_num = get_team(overall_config, index)
bot_configuration.team = team_num
# Setting up data about what type of bot it is
bot_type = overall_config.get(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TYPE_KEY, index)
(bot_configuration.bot, bot_configuration.rlbot_controlled) = get_bot_options(bot_type)
bot_configuration.bot_skill = overall_config.getfloat(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_BOT_SKILL_KEY, index)
if not bot_configuration.bot:
bot_configuration.human_index = human_index_tracker.increment() # depends on [control=['if'], data=[]]
# Setting up the bots name
bot_configuration.name = config_bundle.name
loadout_config = load_bot_appearance(looks_config_object, team_num)
bot_configuration.loadout_config = loadout_config
return bot_configuration |
def dependent_hosted_number_orders(self):
"""
Access the dependent_hosted_number_orders
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
"""
if self._dependent_hosted_number_orders is None:
self._dependent_hosted_number_orders = DependentHostedNumberOrderList(
self._version,
signing_document_sid=self._solution['sid'],
)
return self._dependent_hosted_number_orders | def function[dependent_hosted_number_orders, parameter[self]]:
constant[
Access the dependent_hosted_number_orders
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
]
if compare[name[self]._dependent_hosted_number_orders is constant[None]] begin[:]
name[self]._dependent_hosted_number_orders assign[=] call[name[DependentHostedNumberOrderList], parameter[name[self]._version]]
return[name[self]._dependent_hosted_number_orders] | keyword[def] identifier[dependent_hosted_number_orders] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_dependent_hosted_number_orders] keyword[is] keyword[None] :
identifier[self] . identifier[_dependent_hosted_number_orders] = identifier[DependentHostedNumberOrderList] (
identifier[self] . identifier[_version] ,
identifier[signing_document_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
)
keyword[return] identifier[self] . identifier[_dependent_hosted_number_orders] | def dependent_hosted_number_orders(self):
"""
Access the dependent_hosted_number_orders
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
"""
if self._dependent_hosted_number_orders is None:
self._dependent_hosted_number_orders = DependentHostedNumberOrderList(self._version, signing_document_sid=self._solution['sid']) # depends on [control=['if'], data=[]]
return self._dependent_hosted_number_orders |
def decode(cls, key):
"""
Decode a bucket key into a BucketKey instance.
:param key: The string form of a bucket key.
:returns: A suitable instance of BucketKey corresponding to
the passed-in key.
"""
# Determine bucket key version
prefix, sep, param_str = key.partition(':')
if sep != ':' or prefix not in cls._prefix_to_version:
raise ValueError("%r is not a bucket key" % key)
version = cls._prefix_to_version[prefix]
# Take the parameters apart...
parts = param_str.split('/')
uuid = parts.pop(0)
params = {}
for part in parts:
name, sep, value = part.partition('=')
# Make sure it's well-formed
if sep != '=':
raise ValueError("Cannot interpret parameter expression %r" %
part)
params[name] = cls._decode(value)
# Return a BucketKey
return cls(uuid, params, version=version) | def function[decode, parameter[cls, key]]:
constant[
Decode a bucket key into a BucketKey instance.
:param key: The string form of a bucket key.
:returns: A suitable instance of BucketKey corresponding to
the passed-in key.
]
<ast.Tuple object at 0x7da1b26aef20> assign[=] call[name[key].partition, parameter[constant[:]]]
if <ast.BoolOp object at 0x7da1b26ae3e0> begin[:]
<ast.Raise object at 0x7da1b26ad2d0>
variable[version] assign[=] call[name[cls]._prefix_to_version][name[prefix]]
variable[parts] assign[=] call[name[param_str].split, parameter[constant[/]]]
variable[uuid] assign[=] call[name[parts].pop, parameter[constant[0]]]
variable[params] assign[=] dictionary[[], []]
for taget[name[part]] in starred[name[parts]] begin[:]
<ast.Tuple object at 0x7da20e9b0d00> assign[=] call[name[part].partition, parameter[constant[=]]]
if compare[name[sep] not_equal[!=] constant[=]] begin[:]
<ast.Raise object at 0x7da20e9b23e0>
call[name[params]][name[name]] assign[=] call[name[cls]._decode, parameter[name[value]]]
return[call[name[cls], parameter[name[uuid], name[params]]]] | keyword[def] identifier[decode] ( identifier[cls] , identifier[key] ):
literal[string]
identifier[prefix] , identifier[sep] , identifier[param_str] = identifier[key] . identifier[partition] ( literal[string] )
keyword[if] identifier[sep] != literal[string] keyword[or] identifier[prefix] keyword[not] keyword[in] identifier[cls] . identifier[_prefix_to_version] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[key] )
identifier[version] = identifier[cls] . identifier[_prefix_to_version] [ identifier[prefix] ]
identifier[parts] = identifier[param_str] . identifier[split] ( literal[string] )
identifier[uuid] = identifier[parts] . identifier[pop] ( literal[int] )
identifier[params] ={}
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[name] , identifier[sep] , identifier[value] = identifier[part] . identifier[partition] ( literal[string] )
keyword[if] identifier[sep] != literal[string] :
keyword[raise] identifier[ValueError] ( literal[string] %
identifier[part] )
identifier[params] [ identifier[name] ]= identifier[cls] . identifier[_decode] ( identifier[value] )
keyword[return] identifier[cls] ( identifier[uuid] , identifier[params] , identifier[version] = identifier[version] ) | def decode(cls, key):
"""
Decode a bucket key into a BucketKey instance.
:param key: The string form of a bucket key.
:returns: A suitable instance of BucketKey corresponding to
the passed-in key.
"""
# Determine bucket key version
(prefix, sep, param_str) = key.partition(':')
if sep != ':' or prefix not in cls._prefix_to_version:
raise ValueError('%r is not a bucket key' % key) # depends on [control=['if'], data=[]]
version = cls._prefix_to_version[prefix]
# Take the parameters apart...
parts = param_str.split('/')
uuid = parts.pop(0)
params = {}
for part in parts:
(name, sep, value) = part.partition('=')
# Make sure it's well-formed
if sep != '=':
raise ValueError('Cannot interpret parameter expression %r' % part) # depends on [control=['if'], data=[]]
params[name] = cls._decode(value) # depends on [control=['for'], data=['part']]
# Return a BucketKey
return cls(uuid, params, version=version) |
async def handle_client_hello(self, client_addr, _: ClientHello):
""" Handle an ClientHello message. Send available containers to the client """
self._logger.info("New client connected %s", client_addr)
self._registered_clients.add(client_addr)
await self.send_container_update_to_client([client_addr]) | <ast.AsyncFunctionDef object at 0x7da18f58e890> | keyword[async] keyword[def] identifier[handle_client_hello] ( identifier[self] , identifier[client_addr] , identifier[_] : identifier[ClientHello] ):
literal[string]
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] , identifier[client_addr] )
identifier[self] . identifier[_registered_clients] . identifier[add] ( identifier[client_addr] )
keyword[await] identifier[self] . identifier[send_container_update_to_client] ([ identifier[client_addr] ]) | async def handle_client_hello(self, client_addr, _: ClientHello):
""" Handle an ClientHello message. Send available containers to the client """
self._logger.info('New client connected %s', client_addr)
self._registered_clients.add(client_addr)
await self.send_container_update_to_client([client_addr]) |
def process_entry(self, entry):
'Construct a Post from a feedparser entry and save/update it in db'
from feedjack.models import Post, Tag
## Construct a Post object from feedparser entry (FeedParserDict)
post = Post(feed=self.feed)
post.link = entry.get('link', self.feed.link)
post.title = entry.get('title', post.link)
post.guid = self._get_guid(entry)
if 'author_detail' in entry:
post.author = entry.author_detail.get('name', '')
post.author_email = entry.author_detail.get('email', '')
if not post.author: post.author = entry.get('author', entry.get('creator', ''))
if not post.author_email: post.author_email = 'nospam@nospam.com'
try: post.content = entry.content[0].value
except: post.content = entry.get('summary', entry.get('description', ''))
# Try to get the post date from "updated" then "published" then "created"
ts_parsed = ts_raw = None
for k in self.post_timestamp_keys:
try:
post.date_modified = get_modified_date(
entry.get('{0}_parsed'.format(k)), entry.get(k) )
except ValueError as err:
log.warn( 'Failed to process post timestamp:'
' {0} (feed_id: {1}, post_guid: {2})'.format(err, self.feed.id, post.guid) )
if post.date_modified: break
post.comments = entry.get('comments', '')
enclosures = entry.get('enclosures', list())
if 'media_content' in entry:
for mc in entry.media_content:
if 'url' in mc: e = dict(href=mc['url'], medium=mc.get('medium', 'image'))
else: e = entry.media_content
e['type'] = 'application/x-media-content' # special ct for these things
enclosures.append(e)
assert enclosures, enclosures
post.enclosures = enclosures
## Get a list of tag objects from an entry
# Note that these objects can't go into m2m field until properly saved
fcat = list()
if entry.has_key('tags'):
for tcat in entry.tags:
qcat = tcat.label if tcat.label is not None else tcat.term
if not qcat: continue
qcat = qcat.strip()
if ',' in qcat or '/' in qcat: qcat = qcat.replace(',', '/').split('/')
else: qcat = [qcat]
for zcat in qcat:
tagname = ' '.join(zcat.lower().split()).strip()[:255]
if not tagname: continue
if not Tag.objects.filter(name=tagname):
cobj = Tag(name=tagname)
cobj.save()
fcat.append(Tag.objects.get(name=tagname))
## Some feedback
post_base_fields = 'title link guid author author_email'.split()
log.debug('[{0}] Entry\n{1}'.format(self.feed.id, '\n'.join(
[' {0}: {1}'.format(key, getattr(post, key)) for key in post_base_fields]
+ ['tags: {0}'.format(' '.join(it.imap(op.attrgetter('name'), fcat)))] )))
## Store / update a post
if post.guid in self.postdict: # post exists, update if it was modified (and feed is mutable)
post_old = self.postdict[post.guid]
changed = post_old.content != post.content or (
post.date_modified and post_old.date_modified != post.date_modified )
if not self.feed.immutable and changed:
retval = ENTRY_UPDATED
log.extra('[{0}] Updating existing post: {1}'.format(self.feed.id, post.link))
# Update fields
for field in post_base_fields + ['content', 'comments']:
setattr(post_old, field, getattr(post, field))
post_old.date_modified = post.date_modified or post_old.date_modified
# Update tags
post_old.tags.clear()
for tcat in fcat: post_old.tags.add(tcat)
post_old.save()
else:
retval = ENTRY_SAME
log.extra( ( '[{0}] Post has not changed: {1}' if not changed else
'[{0}] Post changed, but feed is marked as immutable: {1}' )\
.format(self.feed.id, post.link) )
else: # new post, store it into database
retval = ENTRY_NEW
log.extra( '[{0}] Saving new post: {1} (timestamp: {2})'\
.format(self.feed.id, post.guid, post.date_modified) )
# Try hard to set date_modified: feed.modified, http.modified and now() as a last resort
if not post.date_modified and self.fpf:
try:
post.date_modified = get_modified_date(
self.fpf.feed.get('modified_parsed') or self.fpf.get('modified_parsed'),
self.fpf.feed.get('modified') or self.fpf.get('modified') )
except ValueError as err:
log.warn(( 'Failed to process feed/http timestamp: {0} (feed_id: {1},'
' post_guid: {2}), falling back to "now"' ).format(err, self.feed.id, post.guid))
if not post.date_modified:
post.date_modified = timezone.now()
log.debug(( '[{0}] Using current time for post'
' ({1}) timestamp' ).format(self.feed.id, post.guid))
else:
log.debug(
'[{0}] Using timestamp from feed/http for post ({1}): {2}'\
.format(self.feed.id, post.guid, post.date_modified) )
if self.options.hidden: post.hidden = True
try: post.save()
except IntegrityError:
log.error( 'IntegrityError while saving (supposedly) new'\
' post with guid: {0.guid}, link: {0.link}, title: {0.title}'.format(post) )
raise
for tcat in fcat: post.tags.add(tcat)
self.postdict[post.guid] = post
return retval | def function[process_entry, parameter[self, entry]]:
constant[Construct a Post from a feedparser entry and save/update it in db]
from relative_module[feedjack.models] import module[Post], module[Tag]
variable[post] assign[=] call[name[Post], parameter[]]
name[post].link assign[=] call[name[entry].get, parameter[constant[link], name[self].feed.link]]
name[post].title assign[=] call[name[entry].get, parameter[constant[title], name[post].link]]
name[post].guid assign[=] call[name[self]._get_guid, parameter[name[entry]]]
if compare[constant[author_detail] in name[entry]] begin[:]
name[post].author assign[=] call[name[entry].author_detail.get, parameter[constant[name], constant[]]]
name[post].author_email assign[=] call[name[entry].author_detail.get, parameter[constant[email], constant[]]]
if <ast.UnaryOp object at 0x7da1b0a37cd0> begin[:]
name[post].author assign[=] call[name[entry].get, parameter[constant[author], call[name[entry].get, parameter[constant[creator], constant[]]]]]
if <ast.UnaryOp object at 0x7da1b0a22110> begin[:]
name[post].author_email assign[=] constant[nospam@nospam.com]
<ast.Try object at 0x7da1b0a22f20>
variable[ts_parsed] assign[=] constant[None]
for taget[name[k]] in starred[name[self].post_timestamp_keys] begin[:]
<ast.Try object at 0x7da1b0a229e0>
if name[post].date_modified begin[:]
break
name[post].comments assign[=] call[name[entry].get, parameter[constant[comments], constant[]]]
variable[enclosures] assign[=] call[name[entry].get, parameter[constant[enclosures], call[name[list], parameter[]]]]
if compare[constant[media_content] in name[entry]] begin[:]
for taget[name[mc]] in starred[name[entry].media_content] begin[:]
if compare[constant[url] in name[mc]] begin[:]
variable[e] assign[=] call[name[dict], parameter[]]
call[name[e]][constant[type]] assign[=] constant[application/x-media-content]
call[name[enclosures].append, parameter[name[e]]]
assert[name[enclosures]]
name[post].enclosures assign[=] name[enclosures]
variable[fcat] assign[=] call[name[list], parameter[]]
if call[name[entry].has_key, parameter[constant[tags]]] begin[:]
for taget[name[tcat]] in starred[name[entry].tags] begin[:]
variable[qcat] assign[=] <ast.IfExp object at 0x7da18f7217e0>
if <ast.UnaryOp object at 0x7da18f722d40> begin[:]
continue
variable[qcat] assign[=] call[name[qcat].strip, parameter[]]
if <ast.BoolOp object at 0x7da18f720a90> begin[:]
variable[qcat] assign[=] call[call[name[qcat].replace, parameter[constant[,], constant[/]]].split, parameter[constant[/]]]
for taget[name[zcat]] in starred[name[qcat]] begin[:]
variable[tagname] assign[=] call[call[call[constant[ ].join, parameter[call[call[name[zcat].lower, parameter[]].split, parameter[]]]].strip, parameter[]]][<ast.Slice object at 0x7da2041dae00>]
if <ast.UnaryOp object at 0x7da2041d86d0> begin[:]
continue
if <ast.UnaryOp object at 0x7da2041d99c0> begin[:]
variable[cobj] assign[=] call[name[Tag], parameter[]]
call[name[cobj].save, parameter[]]
call[name[fcat].append, parameter[call[name[Tag].objects.get, parameter[]]]]
variable[post_base_fields] assign[=] call[constant[title link guid author author_email].split, parameter[]]
call[name[log].debug, parameter[call[constant[[{0}] Entry
{1}].format, parameter[name[self].feed.id, call[constant[
].join, parameter[binary_operation[<ast.ListComp object at 0x7da2041da9e0> + list[[<ast.Call object at 0x7da2041d97b0>]]]]]]]]]
if compare[name[post].guid in name[self].postdict] begin[:]
variable[post_old] assign[=] call[name[self].postdict][name[post].guid]
variable[changed] assign[=] <ast.BoolOp object at 0x7da2041d9870>
if <ast.BoolOp object at 0x7da2041d8e50> begin[:]
variable[retval] assign[=] name[ENTRY_UPDATED]
call[name[log].extra, parameter[call[constant[[{0}] Updating existing post: {1}].format, parameter[name[self].feed.id, name[post].link]]]]
for taget[name[field]] in starred[binary_operation[name[post_base_fields] + list[[<ast.Constant object at 0x7da2041db0a0>, <ast.Constant object at 0x7da2041d89a0>]]]] begin[:]
call[name[setattr], parameter[name[post_old], name[field], call[name[getattr], parameter[name[post], name[field]]]]]
name[post_old].date_modified assign[=] <ast.BoolOp object at 0x7da2041d8700>
call[name[post_old].tags.clear, parameter[]]
for taget[name[tcat]] in starred[name[fcat]] begin[:]
call[name[post_old].tags.add, parameter[name[tcat]]]
call[name[post_old].save, parameter[]]
return[name[retval]] | keyword[def] identifier[process_entry] ( identifier[self] , identifier[entry] ):
literal[string]
keyword[from] identifier[feedjack] . identifier[models] keyword[import] identifier[Post] , identifier[Tag]
identifier[post] = identifier[Post] ( identifier[feed] = identifier[self] . identifier[feed] )
identifier[post] . identifier[link] = identifier[entry] . identifier[get] ( literal[string] , identifier[self] . identifier[feed] . identifier[link] )
identifier[post] . identifier[title] = identifier[entry] . identifier[get] ( literal[string] , identifier[post] . identifier[link] )
identifier[post] . identifier[guid] = identifier[self] . identifier[_get_guid] ( identifier[entry] )
keyword[if] literal[string] keyword[in] identifier[entry] :
identifier[post] . identifier[author] = identifier[entry] . identifier[author_detail] . identifier[get] ( literal[string] , literal[string] )
identifier[post] . identifier[author_email] = identifier[entry] . identifier[author_detail] . identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[post] . identifier[author] : identifier[post] . identifier[author] = identifier[entry] . identifier[get] ( literal[string] , identifier[entry] . identifier[get] ( literal[string] , literal[string] ))
keyword[if] keyword[not] identifier[post] . identifier[author_email] : identifier[post] . identifier[author_email] = literal[string]
keyword[try] : identifier[post] . identifier[content] = identifier[entry] . identifier[content] [ literal[int] ]. identifier[value]
keyword[except] : identifier[post] . identifier[content] = identifier[entry] . identifier[get] ( literal[string] , identifier[entry] . identifier[get] ( literal[string] , literal[string] ))
identifier[ts_parsed] = identifier[ts_raw] = keyword[None]
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[post_timestamp_keys] :
keyword[try] :
identifier[post] . identifier[date_modified] = identifier[get_modified_date] (
identifier[entry] . identifier[get] ( literal[string] . identifier[format] ( identifier[k] )), identifier[entry] . identifier[get] ( identifier[k] ))
keyword[except] identifier[ValueError] keyword[as] identifier[err] :
identifier[log] . identifier[warn] ( literal[string]
literal[string] . identifier[format] ( identifier[err] , identifier[self] . identifier[feed] . identifier[id] , identifier[post] . identifier[guid] ))
keyword[if] identifier[post] . identifier[date_modified] : keyword[break]
identifier[post] . identifier[comments] = identifier[entry] . identifier[get] ( literal[string] , literal[string] )
identifier[enclosures] = identifier[entry] . identifier[get] ( literal[string] , identifier[list] ())
keyword[if] literal[string] keyword[in] identifier[entry] :
keyword[for] identifier[mc] keyword[in] identifier[entry] . identifier[media_content] :
keyword[if] literal[string] keyword[in] identifier[mc] : identifier[e] = identifier[dict] ( identifier[href] = identifier[mc] [ literal[string] ], identifier[medium] = identifier[mc] . identifier[get] ( literal[string] , literal[string] ))
keyword[else] : identifier[e] = identifier[entry] . identifier[media_content]
identifier[e] [ literal[string] ]= literal[string]
identifier[enclosures] . identifier[append] ( identifier[e] )
keyword[assert] identifier[enclosures] , identifier[enclosures]
identifier[post] . identifier[enclosures] = identifier[enclosures]
identifier[fcat] = identifier[list] ()
keyword[if] identifier[entry] . identifier[has_key] ( literal[string] ):
keyword[for] identifier[tcat] keyword[in] identifier[entry] . identifier[tags] :
identifier[qcat] = identifier[tcat] . identifier[label] keyword[if] identifier[tcat] . identifier[label] keyword[is] keyword[not] keyword[None] keyword[else] identifier[tcat] . identifier[term]
keyword[if] keyword[not] identifier[qcat] : keyword[continue]
identifier[qcat] = identifier[qcat] . identifier[strip] ()
keyword[if] literal[string] keyword[in] identifier[qcat] keyword[or] literal[string] keyword[in] identifier[qcat] : identifier[qcat] = identifier[qcat] . identifier[replace] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
keyword[else] : identifier[qcat] =[ identifier[qcat] ]
keyword[for] identifier[zcat] keyword[in] identifier[qcat] :
identifier[tagname] = literal[string] . identifier[join] ( identifier[zcat] . identifier[lower] (). identifier[split] ()). identifier[strip] ()[: literal[int] ]
keyword[if] keyword[not] identifier[tagname] : keyword[continue]
keyword[if] keyword[not] identifier[Tag] . identifier[objects] . identifier[filter] ( identifier[name] = identifier[tagname] ):
identifier[cobj] = identifier[Tag] ( identifier[name] = identifier[tagname] )
identifier[cobj] . identifier[save] ()
identifier[fcat] . identifier[append] ( identifier[Tag] . identifier[objects] . identifier[get] ( identifier[name] = identifier[tagname] ))
identifier[post_base_fields] = literal[string] . identifier[split] ()
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[feed] . identifier[id] , literal[string] . identifier[join] (
[ literal[string] . identifier[format] ( identifier[key] , identifier[getattr] ( identifier[post] , identifier[key] )) keyword[for] identifier[key] keyword[in] identifier[post_base_fields] ]
+[ literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[it] . identifier[imap] ( identifier[op] . identifier[attrgetter] ( literal[string] ), identifier[fcat] )))])))
keyword[if] identifier[post] . identifier[guid] keyword[in] identifier[self] . identifier[postdict] :
identifier[post_old] = identifier[self] . identifier[postdict] [ identifier[post] . identifier[guid] ]
identifier[changed] = identifier[post_old] . identifier[content] != identifier[post] . identifier[content] keyword[or] (
identifier[post] . identifier[date_modified] keyword[and] identifier[post_old] . identifier[date_modified] != identifier[post] . identifier[date_modified] )
keyword[if] keyword[not] identifier[self] . identifier[feed] . identifier[immutable] keyword[and] identifier[changed] :
identifier[retval] = identifier[ENTRY_UPDATED]
identifier[log] . identifier[extra] ( literal[string] . identifier[format] ( identifier[self] . identifier[feed] . identifier[id] , identifier[post] . identifier[link] ))
keyword[for] identifier[field] keyword[in] identifier[post_base_fields] +[ literal[string] , literal[string] ]:
identifier[setattr] ( identifier[post_old] , identifier[field] , identifier[getattr] ( identifier[post] , identifier[field] ))
identifier[post_old] . identifier[date_modified] = identifier[post] . identifier[date_modified] keyword[or] identifier[post_old] . identifier[date_modified]
identifier[post_old] . identifier[tags] . identifier[clear] ()
keyword[for] identifier[tcat] keyword[in] identifier[fcat] : identifier[post_old] . identifier[tags] . identifier[add] ( identifier[tcat] )
identifier[post_old] . identifier[save] ()
keyword[else] :
identifier[retval] = identifier[ENTRY_SAME]
identifier[log] . identifier[extra] (( literal[string] keyword[if] keyword[not] identifier[changed] keyword[else]
literal[string] ). identifier[format] ( identifier[self] . identifier[feed] . identifier[id] , identifier[post] . identifier[link] ))
keyword[else] :
identifier[retval] = identifier[ENTRY_NEW]
identifier[log] . identifier[extra] ( literal[string] . identifier[format] ( identifier[self] . identifier[feed] . identifier[id] , identifier[post] . identifier[guid] , identifier[post] . identifier[date_modified] ))
keyword[if] keyword[not] identifier[post] . identifier[date_modified] keyword[and] identifier[self] . identifier[fpf] :
keyword[try] :
identifier[post] . identifier[date_modified] = identifier[get_modified_date] (
identifier[self] . identifier[fpf] . identifier[feed] . identifier[get] ( literal[string] ) keyword[or] identifier[self] . identifier[fpf] . identifier[get] ( literal[string] ),
identifier[self] . identifier[fpf] . identifier[feed] . identifier[get] ( literal[string] ) keyword[or] identifier[self] . identifier[fpf] . identifier[get] ( literal[string] ))
keyword[except] identifier[ValueError] keyword[as] identifier[err] :
identifier[log] . identifier[warn] (( literal[string]
literal[string] ). identifier[format] ( identifier[err] , identifier[self] . identifier[feed] . identifier[id] , identifier[post] . identifier[guid] ))
keyword[if] keyword[not] identifier[post] . identifier[date_modified] :
identifier[post] . identifier[date_modified] = identifier[timezone] . identifier[now] ()
identifier[log] . identifier[debug] (( literal[string]
literal[string] ). identifier[format] ( identifier[self] . identifier[feed] . identifier[id] , identifier[post] . identifier[guid] ))
keyword[else] :
identifier[log] . identifier[debug] (
literal[string] . identifier[format] ( identifier[self] . identifier[feed] . identifier[id] , identifier[post] . identifier[guid] , identifier[post] . identifier[date_modified] ))
keyword[if] identifier[self] . identifier[options] . identifier[hidden] : identifier[post] . identifier[hidden] = keyword[True]
keyword[try] : identifier[post] . identifier[save] ()
keyword[except] identifier[IntegrityError] :
identifier[log] . identifier[error] ( literal[string] literal[string] . identifier[format] ( identifier[post] ))
keyword[raise]
keyword[for] identifier[tcat] keyword[in] identifier[fcat] : identifier[post] . identifier[tags] . identifier[add] ( identifier[tcat] )
identifier[self] . identifier[postdict] [ identifier[post] . identifier[guid] ]= identifier[post]
keyword[return] identifier[retval] | def process_entry(self, entry):
"""Construct a Post from a feedparser entry and save/update it in db"""
from feedjack.models import Post, Tag ## Construct a Post object from feedparser entry (FeedParserDict)
post = Post(feed=self.feed)
post.link = entry.get('link', self.feed.link)
post.title = entry.get('title', post.link)
post.guid = self._get_guid(entry)
if 'author_detail' in entry:
post.author = entry.author_detail.get('name', '')
post.author_email = entry.author_detail.get('email', '') # depends on [control=['if'], data=['entry']]
if not post.author:
post.author = entry.get('author', entry.get('creator', '')) # depends on [control=['if'], data=[]]
if not post.author_email:
post.author_email = 'nospam@nospam.com' # depends on [control=['if'], data=[]]
try:
post.content = entry.content[0].value # depends on [control=['try'], data=[]]
except:
post.content = entry.get('summary', entry.get('description', '')) # depends on [control=['except'], data=[]] # Try to get the post date from "updated" then "published" then "created"
ts_parsed = ts_raw = None
for k in self.post_timestamp_keys:
try:
post.date_modified = get_modified_date(entry.get('{0}_parsed'.format(k)), entry.get(k)) # depends on [control=['try'], data=[]]
except ValueError as err:
log.warn('Failed to process post timestamp: {0} (feed_id: {1}, post_guid: {2})'.format(err, self.feed.id, post.guid)) # depends on [control=['except'], data=['err']]
if post.date_modified:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
post.comments = entry.get('comments', '')
enclosures = entry.get('enclosures', list())
if 'media_content' in entry:
for mc in entry.media_content:
if 'url' in mc:
e = dict(href=mc['url'], medium=mc.get('medium', 'image')) # depends on [control=['if'], data=['mc']]
else:
e = entry.media_content
e['type'] = 'application/x-media-content' # special ct for these things
enclosures.append(e) # depends on [control=['for'], data=['mc']]
assert enclosures, enclosures # depends on [control=['if'], data=['entry']]
post.enclosures = enclosures ## Get a list of tag objects from an entry
# Note that these objects can't go into m2m field until properly saved
fcat = list()
if entry.has_key('tags'):
for tcat in entry.tags:
qcat = tcat.label if tcat.label is not None else tcat.term
if not qcat:
continue # depends on [control=['if'], data=[]]
qcat = qcat.strip()
if ',' in qcat or '/' in qcat:
qcat = qcat.replace(',', '/').split('/') # depends on [control=['if'], data=[]]
else:
qcat = [qcat]
for zcat in qcat:
tagname = ' '.join(zcat.lower().split()).strip()[:255]
if not tagname:
continue # depends on [control=['if'], data=[]]
if not Tag.objects.filter(name=tagname):
cobj = Tag(name=tagname)
cobj.save() # depends on [control=['if'], data=[]]
fcat.append(Tag.objects.get(name=tagname)) # depends on [control=['for'], data=['zcat']] # depends on [control=['for'], data=['tcat']] # depends on [control=['if'], data=[]] ## Some feedback
post_base_fields = 'title link guid author author_email'.split()
log.debug('[{0}] Entry\n{1}'.format(self.feed.id, '\n'.join([' {0}: {1}'.format(key, getattr(post, key)) for key in post_base_fields] + ['tags: {0}'.format(' '.join(it.imap(op.attrgetter('name'), fcat)))]))) ## Store / update a post
if post.guid in self.postdict: # post exists, update if it was modified (and feed is mutable)
post_old = self.postdict[post.guid]
changed = post_old.content != post.content or (post.date_modified and post_old.date_modified != post.date_modified)
if not self.feed.immutable and changed:
retval = ENTRY_UPDATED
log.extra('[{0}] Updating existing post: {1}'.format(self.feed.id, post.link)) # Update fields
for field in post_base_fields + ['content', 'comments']:
setattr(post_old, field, getattr(post, field)) # depends on [control=['for'], data=['field']]
post_old.date_modified = post.date_modified or post_old.date_modified # Update tags
post_old.tags.clear()
for tcat in fcat:
post_old.tags.add(tcat) # depends on [control=['for'], data=['tcat']]
post_old.save() # depends on [control=['if'], data=[]]
else:
retval = ENTRY_SAME
log.extra(('[{0}] Post has not changed: {1}' if not changed else '[{0}] Post changed, but feed is marked as immutable: {1}').format(self.feed.id, post.link)) # depends on [control=['if'], data=[]]
else: # new post, store it into database
retval = ENTRY_NEW
log.extra('[{0}] Saving new post: {1} (timestamp: {2})'.format(self.feed.id, post.guid, post.date_modified)) # Try hard to set date_modified: feed.modified, http.modified and now() as a last resort
if not post.date_modified and self.fpf:
try:
post.date_modified = get_modified_date(self.fpf.feed.get('modified_parsed') or self.fpf.get('modified_parsed'), self.fpf.feed.get('modified') or self.fpf.get('modified')) # depends on [control=['try'], data=[]]
except ValueError as err:
log.warn('Failed to process feed/http timestamp: {0} (feed_id: {1}, post_guid: {2}), falling back to "now"'.format(err, self.feed.id, post.guid)) # depends on [control=['except'], data=['err']]
if not post.date_modified:
post.date_modified = timezone.now()
log.debug('[{0}] Using current time for post ({1}) timestamp'.format(self.feed.id, post.guid)) # depends on [control=['if'], data=[]]
else:
log.debug('[{0}] Using timestamp from feed/http for post ({1}): {2}'.format(self.feed.id, post.guid, post.date_modified)) # depends on [control=['if'], data=[]]
if self.options.hidden:
post.hidden = True # depends on [control=['if'], data=[]]
try:
post.save() # depends on [control=['try'], data=[]]
except IntegrityError:
log.error('IntegrityError while saving (supposedly) new post with guid: {0.guid}, link: {0.link}, title: {0.title}'.format(post))
raise # depends on [control=['except'], data=[]]
for tcat in fcat:
post.tags.add(tcat) # depends on [control=['for'], data=['tcat']]
self.postdict[post.guid] = post
return retval |
def vert_quality(script, min_quality=0.0, max_quality=0.05, inclusive=True):
""" Select all the faces and vertexes within the specified vertex quality
range.
Args:
script: the FilterScript object or script filename to write
the filter] to.
min_quality (float): Minimum acceptable quality value.
max_quality (float): Maximum acceptable quality value.
inclusive (bool): If True only the faces with ALL the vertices within
the specified range are selected. Otherwise any face with at least
one vertex within the range is selected.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Select by Vertex Quality">\n',
' <Param name="minQ" ',
'value="{}" '.format(min_quality),
'description="Min Quality" ',
'min="0" ',
'max="{}" '.format(2 * max_quality),
'type="RichDynamicFloat" ',
'/>\n',
' <Param name="maxQ" ',
'value="{}" '.format(max_quality),
'description="Max Quality" ',
'min="0" ',
'max="{}" '.format(2 * max_quality),
'type="RichDynamicFloat" ',
'/>\n',
' <Param name="Inclusive" ',
'value="{}" '.format(str(inclusive).lower()),
'description="Inclusive Sel." ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | def function[vert_quality, parameter[script, min_quality, max_quality, inclusive]]:
constant[ Select all the faces and vertexes within the specified vertex quality
range.
Args:
script: the FilterScript object or script filename to write
the filter] to.
min_quality (float): Minimum acceptable quality value.
max_quality (float): Maximum acceptable quality value.
inclusive (bool): If True only the faces with ALL the vertices within
the specified range are selected. Otherwise any face with at least
one vertex within the range is selected.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
]
variable[filter_xml] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da18dc05b40>, <ast.Constant object at 0x7da18dc046d0>, <ast.Call object at 0x7da18dc05990>, <ast.Constant object at 0x7da18dc04c10>, <ast.Constant object at 0x7da18dc055d0>, <ast.Call object at 0x7da18dc05fc0>, <ast.Constant object at 0x7da18dc07ac0>, <ast.Constant object at 0x7da18dc06d70>, <ast.Constant object at 0x7da18dc056f0>, <ast.Call object at 0x7da18dc063b0>, <ast.Constant object at 0x7da18dc05e40>, <ast.Constant object at 0x7da18dc05060>, <ast.Call object at 0x7da18dc053c0>, <ast.Constant object at 0x7da18dc06c50>, <ast.Constant object at 0x7da18dc06e30>, <ast.Constant object at 0x7da18dc04b20>, <ast.Call object at 0x7da18dc06680>, <ast.Constant object at 0x7da18dc06bf0>, <ast.Constant object at 0x7da18dc07fd0>, <ast.Constant object at 0x7da18dc06b60>, <ast.Constant object at 0x7da18dc06b00>]]]]
call[name[util].write_filter, parameter[name[script], name[filter_xml]]]
return[constant[None]] | keyword[def] identifier[vert_quality] ( identifier[script] , identifier[min_quality] = literal[int] , identifier[max_quality] = literal[int] , identifier[inclusive] = keyword[True] ):
literal[string]
identifier[filter_xml] = literal[string] . identifier[join] ([
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[min_quality] ),
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( literal[int] * identifier[max_quality] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[max_quality] ),
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( literal[int] * identifier[max_quality] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[str] ( identifier[inclusive] ). identifier[lower] ()),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ])
identifier[util] . identifier[write_filter] ( identifier[script] , identifier[filter_xml] )
keyword[return] keyword[None] | def vert_quality(script, min_quality=0.0, max_quality=0.05, inclusive=True):
""" Select all the faces and vertexes within the specified vertex quality
range.
Args:
script: the FilterScript object or script filename to write
the filter] to.
min_quality (float): Minimum acceptable quality value.
max_quality (float): Maximum acceptable quality value.
inclusive (bool): If True only the faces with ALL the vertices within
the specified range are selected. Otherwise any face with at least
one vertex within the range is selected.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ''.join([' <filter name="Select by Vertex Quality">\n', ' <Param name="minQ" ', 'value="{}" '.format(min_quality), 'description="Min Quality" ', 'min="0" ', 'max="{}" '.format(2 * max_quality), 'type="RichDynamicFloat" ', '/>\n', ' <Param name="maxQ" ', 'value="{}" '.format(max_quality), 'description="Max Quality" ', 'min="0" ', 'max="{}" '.format(2 * max_quality), 'type="RichDynamicFloat" ', '/>\n', ' <Param name="Inclusive" ', 'value="{}" '.format(str(inclusive).lower()), 'description="Inclusive Sel." ', 'type="RichBool" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def _xpathDict(xml, xpath, cls, parent, **kwargs):
""" Returns a default Dict given certain information
:param xml: An xml tree
:type xml: etree
:param xpath: XPath to find children
:type xpath: str
:param cls: Class identifying children
:type cls: inventory.Resource
:param parent: Parent of object
:type parent: CtsCollection
:rtype: collections.defaultdict.<basestring, inventory.Resource>
:returns: Dictionary of children
"""
children = []
for child in xml.xpath(xpath, namespaces=XPATH_NAMESPACES):
children.append(cls.parse(
resource=child,
parent=parent,
**kwargs
))
return children | def function[_xpathDict, parameter[xml, xpath, cls, parent]]:
constant[ Returns a default Dict given certain information
:param xml: An xml tree
:type xml: etree
:param xpath: XPath to find children
:type xpath: str
:param cls: Class identifying children
:type cls: inventory.Resource
:param parent: Parent of object
:type parent: CtsCollection
:rtype: collections.defaultdict.<basestring, inventory.Resource>
:returns: Dictionary of children
]
variable[children] assign[=] list[[]]
for taget[name[child]] in starred[call[name[xml].xpath, parameter[name[xpath]]]] begin[:]
call[name[children].append, parameter[call[name[cls].parse, parameter[]]]]
return[name[children]] | keyword[def] identifier[_xpathDict] ( identifier[xml] , identifier[xpath] , identifier[cls] , identifier[parent] ,** identifier[kwargs] ):
literal[string]
identifier[children] =[]
keyword[for] identifier[child] keyword[in] identifier[xml] . identifier[xpath] ( identifier[xpath] , identifier[namespaces] = identifier[XPATH_NAMESPACES] ):
identifier[children] . identifier[append] ( identifier[cls] . identifier[parse] (
identifier[resource] = identifier[child] ,
identifier[parent] = identifier[parent] ,
** identifier[kwargs]
))
keyword[return] identifier[children] | def _xpathDict(xml, xpath, cls, parent, **kwargs):
""" Returns a default Dict given certain information
:param xml: An xml tree
:type xml: etree
:param xpath: XPath to find children
:type xpath: str
:param cls: Class identifying children
:type cls: inventory.Resource
:param parent: Parent of object
:type parent: CtsCollection
:rtype: collections.defaultdict.<basestring, inventory.Resource>
:returns: Dictionary of children
"""
children = []
for child in xml.xpath(xpath, namespaces=XPATH_NAMESPACES):
children.append(cls.parse(resource=child, parent=parent, **kwargs)) # depends on [control=['for'], data=['child']]
return children |
def create(dataset, features=None, distance=None, radius=1.,
min_core_neighbors=10, verbose=True):
"""
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns]
"""
## Start the training time clock and instantiate an empty model
logger = _logging.getLogger(__name__)
start_time = _time.time()
## Validate the input dataset
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Validate neighborhood parameters
if not isinstance(min_core_neighbors, int) or min_core_neighbors < 0:
raise ValueError("Input 'min_core_neighbors' must be a non-negative " +
"integer.")
if not isinstance(radius, (int, float)) or radius < 0:
raise ValueError("Input 'radius' must be a non-negative integer " +
"or float.")
## Compute all-point nearest neighbors within `radius` and count
# neighborhood sizes
knn_model = _tc.nearest_neighbors.create(dataset, features=features,
distance=distance,
method='brute_force',
verbose=verbose)
knn = knn_model.similarity_graph(k=None, radius=radius,
include_self_edges=False,
output_type='SFrame',
verbose=verbose)
neighbor_counts = knn.groupby('query_label', _agg.COUNT)
### NOTE: points with NO neighbors are already dropped here!
## Identify core points and boundary candidate points. Not all of the
# boundary candidates will be boundary points - some are in small isolated
# clusters.
if verbose:
logger.info("Identifying noise points and core points.")
boundary_mask = neighbor_counts['Count'] < min_core_neighbors
core_mask = 1 - boundary_mask
# this includes too small clusters
boundary_idx = neighbor_counts[boundary_mask]['query_label']
core_idx = neighbor_counts[core_mask]['query_label']
## Build a similarity graph on the core points
## NOTE: careful with singleton core points - the second filter removes them
# from the edge set so they have to be added separately as vertices.
if verbose:
logger.info("Constructing the core point similarity graph.")
core_vertices = knn.filter_by(core_idx, 'query_label')
core_edges = core_vertices.filter_by(core_idx, 'reference_label')
core_graph = _tc.SGraph()
core_graph = core_graph.add_vertices(core_vertices[['query_label']],
vid_field='query_label')
core_graph = core_graph.add_edges(core_edges, src_field='query_label',
dst_field='reference_label')
## Compute core point connected components and relabel to be consecutive
# integers
cc = _tc.connected_components.create(core_graph, verbose=verbose)
cc_labels = cc.component_size.add_row_number('__label')
core_assignments = cc.component_id.join(cc_labels, on='component_id',
how='left')[['__id', '__label']]
core_assignments['type'] = 'core'
## Join potential boundary points to core cluster labels (points that aren't
# really on a boundary are implicitly dropped)
if verbose:
logger.info("Processing boundary points.")
boundary_edges = knn.filter_by(boundary_idx, 'query_label')
# separate real boundary points from points in small isolated clusters
boundary_core_edges = boundary_edges.filter_by(core_idx, 'reference_label')
# join a boundary point to its single closest core point.
boundary_assignments = boundary_core_edges.groupby('query_label',
{'reference_label': _agg.ARGMIN('rank', 'reference_label')})
boundary_assignments = boundary_assignments.join(core_assignments,
on={'reference_label': '__id'})
boundary_assignments = boundary_assignments.rename({'query_label': '__id'}, inplace=True)
boundary_assignments = boundary_assignments.remove_column('reference_label', inplace=True)
boundary_assignments['type'] = 'boundary'
## Identify boundary candidates that turned out to be in small clusters but
# not on real cluster boundaries
small_cluster_idx = set(boundary_idx).difference(
boundary_assignments['__id'])
## Identify individual noise points by the fact that they have no neighbors.
noise_idx = set(range(dataset.num_rows())).difference(
neighbor_counts['query_label'])
noise_idx = noise_idx.union(small_cluster_idx)
noise_assignments = _tc.SFrame({'row_id': _tc.SArray(list(noise_idx), int)})
noise_assignments['cluster_id'] = None
noise_assignments['cluster_id'] = noise_assignments['cluster_id'].astype(int)
noise_assignments['type'] = 'noise'
## Append core, boundary, and noise results to each other.
master_assignments = _tc.SFrame()
num_clusters = 0
if core_assignments.num_rows() > 0:
core_assignments = core_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(core_assignments)
num_clusters = len(core_assignments['cluster_id'].unique())
if boundary_assignments.num_rows() > 0:
boundary_assignments = boundary_assignments.rename({'__id': 'row_id',
'__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(boundary_assignments)
if noise_assignments.num_rows() > 0:
master_assignments = master_assignments.append(noise_assignments)
## Post-processing and formatting
state = {'verbose': verbose,
'radius': radius,
'min_core_neighbors': min_core_neighbors,
'distance': knn_model.distance,
'num_distance_components': knn_model.num_distance_components,
'num_examples': dataset.num_rows(),
'features': knn_model.features,
'num_features': knn_model.num_features,
'unpacked_features': knn_model.unpacked_features,
'num_unpacked_features': knn_model.num_unpacked_features,
'cluster_id': master_assignments,
'num_clusters': num_clusters,
'training_time': _time.time() - start_time}
return DBSCANModel(state) | def function[create, parameter[dataset, features, distance, radius, min_core_neighbors, verbose]]:
constant[
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns]
]
variable[logger] assign[=] call[name[_logging].getLogger, parameter[name[__name__]]]
variable[start_time] assign[=] call[name[_time].time, parameter[]]
call[name[_tkutl]._raise_error_if_not_sframe, parameter[name[dataset], constant[dataset]]]
call[name[_tkutl]._raise_error_if_sframe_empty, parameter[name[dataset], constant[dataset]]]
if <ast.BoolOp object at 0x7da2049626b0> begin[:]
<ast.Raise object at 0x7da204960ac0>
if <ast.BoolOp object at 0x7da204963160> begin[:]
<ast.Raise object at 0x7da2049638e0>
variable[knn_model] assign[=] call[name[_tc].nearest_neighbors.create, parameter[name[dataset]]]
variable[knn] assign[=] call[name[knn_model].similarity_graph, parameter[]]
variable[neighbor_counts] assign[=] call[name[knn].groupby, parameter[constant[query_label], name[_agg].COUNT]]
if name[verbose] begin[:]
call[name[logger].info, parameter[constant[Identifying noise points and core points.]]]
variable[boundary_mask] assign[=] compare[call[name[neighbor_counts]][constant[Count]] less[<] name[min_core_neighbors]]
variable[core_mask] assign[=] binary_operation[constant[1] - name[boundary_mask]]
variable[boundary_idx] assign[=] call[call[name[neighbor_counts]][name[boundary_mask]]][constant[query_label]]
variable[core_idx] assign[=] call[call[name[neighbor_counts]][name[core_mask]]][constant[query_label]]
if name[verbose] begin[:]
call[name[logger].info, parameter[constant[Constructing the core point similarity graph.]]]
variable[core_vertices] assign[=] call[name[knn].filter_by, parameter[name[core_idx], constant[query_label]]]
variable[core_edges] assign[=] call[name[core_vertices].filter_by, parameter[name[core_idx], constant[reference_label]]]
variable[core_graph] assign[=] call[name[_tc].SGraph, parameter[]]
variable[core_graph] assign[=] call[name[core_graph].add_vertices, parameter[call[name[core_vertices]][list[[<ast.Constant object at 0x7da18bc70d90>]]]]]
variable[core_graph] assign[=] call[name[core_graph].add_edges, parameter[name[core_edges]]]
variable[cc] assign[=] call[name[_tc].connected_components.create, parameter[name[core_graph]]]
variable[cc_labels] assign[=] call[name[cc].component_size.add_row_number, parameter[constant[__label]]]
variable[core_assignments] assign[=] call[call[name[cc].component_id.join, parameter[name[cc_labels]]]][list[[<ast.Constant object at 0x7da18bc72c80>, <ast.Constant object at 0x7da18bc712a0>]]]
call[name[core_assignments]][constant[type]] assign[=] constant[core]
if name[verbose] begin[:]
call[name[logger].info, parameter[constant[Processing boundary points.]]]
variable[boundary_edges] assign[=] call[name[knn].filter_by, parameter[name[boundary_idx], constant[query_label]]]
variable[boundary_core_edges] assign[=] call[name[boundary_edges].filter_by, parameter[name[core_idx], constant[reference_label]]]
variable[boundary_assignments] assign[=] call[name[boundary_core_edges].groupby, parameter[constant[query_label], dictionary[[<ast.Constant object at 0x7da1b1f765c0>], [<ast.Call object at 0x7da1b1f764a0>]]]]
variable[boundary_assignments] assign[=] call[name[boundary_assignments].join, parameter[name[core_assignments]]]
variable[boundary_assignments] assign[=] call[name[boundary_assignments].rename, parameter[dictionary[[<ast.Constant object at 0x7da1b1f77fa0>], [<ast.Constant object at 0x7da1b1f77910>]]]]
variable[boundary_assignments] assign[=] call[name[boundary_assignments].remove_column, parameter[constant[reference_label]]]
call[name[boundary_assignments]][constant[type]] assign[=] constant[boundary]
variable[small_cluster_idx] assign[=] call[call[name[set], parameter[name[boundary_idx]]].difference, parameter[call[name[boundary_assignments]][constant[__id]]]]
variable[noise_idx] assign[=] call[call[name[set], parameter[call[name[range], parameter[call[name[dataset].num_rows, parameter[]]]]]].difference, parameter[call[name[neighbor_counts]][constant[query_label]]]]
variable[noise_idx] assign[=] call[name[noise_idx].union, parameter[name[small_cluster_idx]]]
variable[noise_assignments] assign[=] call[name[_tc].SFrame, parameter[dictionary[[<ast.Constant object at 0x7da1b1f74850>], [<ast.Call object at 0x7da1b1f74610>]]]]
call[name[noise_assignments]][constant[cluster_id]] assign[=] constant[None]
call[name[noise_assignments]][constant[cluster_id]] assign[=] call[call[name[noise_assignments]][constant[cluster_id]].astype, parameter[name[int]]]
call[name[noise_assignments]][constant[type]] assign[=] constant[noise]
variable[master_assignments] assign[=] call[name[_tc].SFrame, parameter[]]
variable[num_clusters] assign[=] constant[0]
if compare[call[name[core_assignments].num_rows, parameter[]] greater[>] constant[0]] begin[:]
variable[core_assignments] assign[=] call[name[core_assignments].rename, parameter[dictionary[[<ast.Constant object at 0x7da1b1f753f0>, <ast.Constant object at 0x7da1b1f77b50>], [<ast.Constant object at 0x7da1b1f74310>, <ast.Constant object at 0x7da1b1f746a0>]]]]
variable[master_assignments] assign[=] call[name[master_assignments].append, parameter[name[core_assignments]]]
variable[num_clusters] assign[=] call[name[len], parameter[call[call[name[core_assignments]][constant[cluster_id]].unique, parameter[]]]]
if compare[call[name[boundary_assignments].num_rows, parameter[]] greater[>] constant[0]] begin[:]
variable[boundary_assignments] assign[=] call[name[boundary_assignments].rename, parameter[dictionary[[<ast.Constant object at 0x7da1b1f74550>, <ast.Constant object at 0x7da1b1f76020>], [<ast.Constant object at 0x7da1b1f77f10>, <ast.Constant object at 0x7da1b1f76710>]]]]
variable[master_assignments] assign[=] call[name[master_assignments].append, parameter[name[boundary_assignments]]]
if compare[call[name[noise_assignments].num_rows, parameter[]] greater[>] constant[0]] begin[:]
variable[master_assignments] assign[=] call[name[master_assignments].append, parameter[name[noise_assignments]]]
variable[state] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f77a60>, <ast.Constant object at 0x7da1b1f74490>, <ast.Constant object at 0x7da1b1f75ea0>, <ast.Constant object at 0x7da1b1f75d80>, <ast.Constant object at 0x7da1b1f77880>, <ast.Constant object at 0x7da1b1f75a20>, <ast.Constant object at 0x7da1b1f75a80>, <ast.Constant object at 0x7da1b1f77280>, <ast.Constant object at 0x7da1b1f745e0>, <ast.Constant object at 0x7da1b1f77b80>, <ast.Constant object at 0x7da1b1f75900>, <ast.Constant object at 0x7da1b1f770a0>, <ast.Constant object at 0x7da1b1f76590>], [<ast.Name object at 0x7da1b1f774c0>, <ast.Name object at 0x7da1b1f77640>, <ast.Name object at 0x7da1b1f75b70>, <ast.Attribute object at 0x7da1b1f74700>, <ast.Attribute object at 0x7da1b1f74f10>, <ast.Call object at 0x7da1b1f75e40>, <ast.Attribute object at 0x7da1b1f767d0>, <ast.Attribute object at 0x7da1b1f77070>, <ast.Attribute object at 0x7da1b1f75f60>, <ast.Attribute object at 0x7da1b1f76350>, <ast.Name object at 0x7da1b1f76a10>, <ast.Name object at 0x7da1b1f761d0>, <ast.BinOp object at 0x7da1b1f77c40>]]
return[call[name[DBSCANModel], parameter[name[state]]]] | keyword[def] identifier[create] ( identifier[dataset] , identifier[features] = keyword[None] , identifier[distance] = keyword[None] , identifier[radius] = literal[int] ,
identifier[min_core_neighbors] = literal[int] , identifier[verbose] = keyword[True] ):
literal[string]
identifier[logger] = identifier[_logging] . identifier[getLogger] ( identifier[__name__] )
identifier[start_time] = identifier[_time] . identifier[time] ()
identifier[_tkutl] . identifier[_raise_error_if_not_sframe] ( identifier[dataset] , literal[string] )
identifier[_tkutl] . identifier[_raise_error_if_sframe_empty] ( identifier[dataset] , literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[min_core_neighbors] , identifier[int] ) keyword[or] identifier[min_core_neighbors] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[radius] ,( identifier[int] , identifier[float] )) keyword[or] identifier[radius] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string] )
identifier[knn_model] = identifier[_tc] . identifier[nearest_neighbors] . identifier[create] ( identifier[dataset] , identifier[features] = identifier[features] ,
identifier[distance] = identifier[distance] ,
identifier[method] = literal[string] ,
identifier[verbose] = identifier[verbose] )
identifier[knn] = identifier[knn_model] . identifier[similarity_graph] ( identifier[k] = keyword[None] , identifier[radius] = identifier[radius] ,
identifier[include_self_edges] = keyword[False] ,
identifier[output_type] = literal[string] ,
identifier[verbose] = identifier[verbose] )
identifier[neighbor_counts] = identifier[knn] . identifier[groupby] ( literal[string] , identifier[_agg] . identifier[COUNT] )
keyword[if] identifier[verbose] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[boundary_mask] = identifier[neighbor_counts] [ literal[string] ]< identifier[min_core_neighbors]
identifier[core_mask] = literal[int] - identifier[boundary_mask]
identifier[boundary_idx] = identifier[neighbor_counts] [ identifier[boundary_mask] ][ literal[string] ]
identifier[core_idx] = identifier[neighbor_counts] [ identifier[core_mask] ][ literal[string] ]
keyword[if] identifier[verbose] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[core_vertices] = identifier[knn] . identifier[filter_by] ( identifier[core_idx] , literal[string] )
identifier[core_edges] = identifier[core_vertices] . identifier[filter_by] ( identifier[core_idx] , literal[string] )
identifier[core_graph] = identifier[_tc] . identifier[SGraph] ()
identifier[core_graph] = identifier[core_graph] . identifier[add_vertices] ( identifier[core_vertices] [[ literal[string] ]],
identifier[vid_field] = literal[string] )
identifier[core_graph] = identifier[core_graph] . identifier[add_edges] ( identifier[core_edges] , identifier[src_field] = literal[string] ,
identifier[dst_field] = literal[string] )
identifier[cc] = identifier[_tc] . identifier[connected_components] . identifier[create] ( identifier[core_graph] , identifier[verbose] = identifier[verbose] )
identifier[cc_labels] = identifier[cc] . identifier[component_size] . identifier[add_row_number] ( literal[string] )
identifier[core_assignments] = identifier[cc] . identifier[component_id] . identifier[join] ( identifier[cc_labels] , identifier[on] = literal[string] ,
identifier[how] = literal[string] )[[ literal[string] , literal[string] ]]
identifier[core_assignments] [ literal[string] ]= literal[string]
keyword[if] identifier[verbose] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[boundary_edges] = identifier[knn] . identifier[filter_by] ( identifier[boundary_idx] , literal[string] )
identifier[boundary_core_edges] = identifier[boundary_edges] . identifier[filter_by] ( identifier[core_idx] , literal[string] )
identifier[boundary_assignments] = identifier[boundary_core_edges] . identifier[groupby] ( literal[string] ,
{ literal[string] : identifier[_agg] . identifier[ARGMIN] ( literal[string] , literal[string] )})
identifier[boundary_assignments] = identifier[boundary_assignments] . identifier[join] ( identifier[core_assignments] ,
identifier[on] ={ literal[string] : literal[string] })
identifier[boundary_assignments] = identifier[boundary_assignments] . identifier[rename] ({ literal[string] : literal[string] }, identifier[inplace] = keyword[True] )
identifier[boundary_assignments] = identifier[boundary_assignments] . identifier[remove_column] ( literal[string] , identifier[inplace] = keyword[True] )
identifier[boundary_assignments] [ literal[string] ]= literal[string]
identifier[small_cluster_idx] = identifier[set] ( identifier[boundary_idx] ). identifier[difference] (
identifier[boundary_assignments] [ literal[string] ])
identifier[noise_idx] = identifier[set] ( identifier[range] ( identifier[dataset] . identifier[num_rows] ())). identifier[difference] (
identifier[neighbor_counts] [ literal[string] ])
identifier[noise_idx] = identifier[noise_idx] . identifier[union] ( identifier[small_cluster_idx] )
identifier[noise_assignments] = identifier[_tc] . identifier[SFrame] ({ literal[string] : identifier[_tc] . identifier[SArray] ( identifier[list] ( identifier[noise_idx] ), identifier[int] )})
identifier[noise_assignments] [ literal[string] ]= keyword[None]
identifier[noise_assignments] [ literal[string] ]= identifier[noise_assignments] [ literal[string] ]. identifier[astype] ( identifier[int] )
identifier[noise_assignments] [ literal[string] ]= literal[string]
identifier[master_assignments] = identifier[_tc] . identifier[SFrame] ()
identifier[num_clusters] = literal[int]
keyword[if] identifier[core_assignments] . identifier[num_rows] ()> literal[int] :
identifier[core_assignments] = identifier[core_assignments] . identifier[rename] ({ literal[string] : literal[string] ,
literal[string] : literal[string] }, identifier[inplace] = keyword[True] )
identifier[master_assignments] = identifier[master_assignments] . identifier[append] ( identifier[core_assignments] )
identifier[num_clusters] = identifier[len] ( identifier[core_assignments] [ literal[string] ]. identifier[unique] ())
keyword[if] identifier[boundary_assignments] . identifier[num_rows] ()> literal[int] :
identifier[boundary_assignments] = identifier[boundary_assignments] . identifier[rename] ({ literal[string] : literal[string] ,
literal[string] : literal[string] }, identifier[inplace] = keyword[True] )
identifier[master_assignments] = identifier[master_assignments] . identifier[append] ( identifier[boundary_assignments] )
keyword[if] identifier[noise_assignments] . identifier[num_rows] ()> literal[int] :
identifier[master_assignments] = identifier[master_assignments] . identifier[append] ( identifier[noise_assignments] )
identifier[state] ={ literal[string] : identifier[verbose] ,
literal[string] : identifier[radius] ,
literal[string] : identifier[min_core_neighbors] ,
literal[string] : identifier[knn_model] . identifier[distance] ,
literal[string] : identifier[knn_model] . identifier[num_distance_components] ,
literal[string] : identifier[dataset] . identifier[num_rows] (),
literal[string] : identifier[knn_model] . identifier[features] ,
literal[string] : identifier[knn_model] . identifier[num_features] ,
literal[string] : identifier[knn_model] . identifier[unpacked_features] ,
literal[string] : identifier[knn_model] . identifier[num_unpacked_features] ,
literal[string] : identifier[master_assignments] ,
literal[string] : identifier[num_clusters] ,
literal[string] : identifier[_time] . identifier[time] ()- identifier[start_time] }
keyword[return] identifier[DBSCANModel] ( identifier[state] ) | def create(dataset, features=None, distance=None, radius=1.0, min_core_neighbors=10, verbose=True):
"""
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the GL Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- Ester, M., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns]
"""
## Start the training time clock and instantiate an empty model
logger = _logging.getLogger(__name__)
start_time = _time.time()
## Validate the input dataset
_tkutl._raise_error_if_not_sframe(dataset, 'dataset')
_tkutl._raise_error_if_sframe_empty(dataset, 'dataset')
## Validate neighborhood parameters
if not isinstance(min_core_neighbors, int) or min_core_neighbors < 0:
raise ValueError("Input 'min_core_neighbors' must be a non-negative " + 'integer.') # depends on [control=['if'], data=[]]
if not isinstance(radius, (int, float)) or radius < 0:
raise ValueError("Input 'radius' must be a non-negative integer " + 'or float.') # depends on [control=['if'], data=[]]
## Compute all-point nearest neighbors within `radius` and count
# neighborhood sizes
knn_model = _tc.nearest_neighbors.create(dataset, features=features, distance=distance, method='brute_force', verbose=verbose)
knn = knn_model.similarity_graph(k=None, radius=radius, include_self_edges=False, output_type='SFrame', verbose=verbose)
neighbor_counts = knn.groupby('query_label', _agg.COUNT)
### NOTE: points with NO neighbors are already dropped here!
## Identify core points and boundary candidate points. Not all of the
# boundary candidates will be boundary points - some are in small isolated
# clusters.
if verbose:
logger.info('Identifying noise points and core points.') # depends on [control=['if'], data=[]]
boundary_mask = neighbor_counts['Count'] < min_core_neighbors
core_mask = 1 - boundary_mask
# this includes too small clusters
boundary_idx = neighbor_counts[boundary_mask]['query_label']
core_idx = neighbor_counts[core_mask]['query_label']
## Build a similarity graph on the core points
## NOTE: careful with singleton core points - the second filter removes them
# from the edge set so they have to be added separately as vertices.
if verbose:
logger.info('Constructing the core point similarity graph.') # depends on [control=['if'], data=[]]
core_vertices = knn.filter_by(core_idx, 'query_label')
core_edges = core_vertices.filter_by(core_idx, 'reference_label')
core_graph = _tc.SGraph()
core_graph = core_graph.add_vertices(core_vertices[['query_label']], vid_field='query_label')
core_graph = core_graph.add_edges(core_edges, src_field='query_label', dst_field='reference_label')
## Compute core point connected components and relabel to be consecutive
# integers
cc = _tc.connected_components.create(core_graph, verbose=verbose)
cc_labels = cc.component_size.add_row_number('__label')
core_assignments = cc.component_id.join(cc_labels, on='component_id', how='left')[['__id', '__label']]
core_assignments['type'] = 'core'
## Join potential boundary points to core cluster labels (points that aren't
# really on a boundary are implicitly dropped)
if verbose:
logger.info('Processing boundary points.') # depends on [control=['if'], data=[]]
boundary_edges = knn.filter_by(boundary_idx, 'query_label')
# separate real boundary points from points in small isolated clusters
boundary_core_edges = boundary_edges.filter_by(core_idx, 'reference_label')
# join a boundary point to its single closest core point.
boundary_assignments = boundary_core_edges.groupby('query_label', {'reference_label': _agg.ARGMIN('rank', 'reference_label')})
boundary_assignments = boundary_assignments.join(core_assignments, on={'reference_label': '__id'})
boundary_assignments = boundary_assignments.rename({'query_label': '__id'}, inplace=True)
boundary_assignments = boundary_assignments.remove_column('reference_label', inplace=True)
boundary_assignments['type'] = 'boundary'
## Identify boundary candidates that turned out to be in small clusters but
# not on real cluster boundaries
small_cluster_idx = set(boundary_idx).difference(boundary_assignments['__id'])
## Identify individual noise points by the fact that they have no neighbors.
noise_idx = set(range(dataset.num_rows())).difference(neighbor_counts['query_label'])
noise_idx = noise_idx.union(small_cluster_idx)
noise_assignments = _tc.SFrame({'row_id': _tc.SArray(list(noise_idx), int)})
noise_assignments['cluster_id'] = None
noise_assignments['cluster_id'] = noise_assignments['cluster_id'].astype(int)
noise_assignments['type'] = 'noise'
## Append core, boundary, and noise results to each other.
master_assignments = _tc.SFrame()
num_clusters = 0
if core_assignments.num_rows() > 0:
core_assignments = core_assignments.rename({'__id': 'row_id', '__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(core_assignments)
num_clusters = len(core_assignments['cluster_id'].unique()) # depends on [control=['if'], data=[]]
if boundary_assignments.num_rows() > 0:
boundary_assignments = boundary_assignments.rename({'__id': 'row_id', '__label': 'cluster_id'}, inplace=True)
master_assignments = master_assignments.append(boundary_assignments) # depends on [control=['if'], data=[]]
if noise_assignments.num_rows() > 0:
master_assignments = master_assignments.append(noise_assignments) # depends on [control=['if'], data=[]]
## Post-processing and formatting
state = {'verbose': verbose, 'radius': radius, 'min_core_neighbors': min_core_neighbors, 'distance': knn_model.distance, 'num_distance_components': knn_model.num_distance_components, 'num_examples': dataset.num_rows(), 'features': knn_model.features, 'num_features': knn_model.num_features, 'unpacked_features': knn_model.unpacked_features, 'num_unpacked_features': knn_model.num_unpacked_features, 'cluster_id': master_assignments, 'num_clusters': num_clusters, 'training_time': _time.time() - start_time}
return DBSCANModel(state) |
def hasMZSignature(self, rd):
"""
Check for MZ signature.
@type rd: L{ReadData}
@param rd: A L{ReadData} object.
@rtype: bool
@return: True is the given L{ReadData} stream has the MZ signature. Otherwise, False.
"""
rd.setOffset(0)
sign = rd.read(2)
if sign == "MZ":
return True
return False | def function[hasMZSignature, parameter[self, rd]]:
constant[
Check for MZ signature.
@type rd: L{ReadData}
@param rd: A L{ReadData} object.
@rtype: bool
@return: True is the given L{ReadData} stream has the MZ signature. Otherwise, False.
]
call[name[rd].setOffset, parameter[constant[0]]]
variable[sign] assign[=] call[name[rd].read, parameter[constant[2]]]
if compare[name[sign] equal[==] constant[MZ]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[hasMZSignature] ( identifier[self] , identifier[rd] ):
literal[string]
identifier[rd] . identifier[setOffset] ( literal[int] )
identifier[sign] = identifier[rd] . identifier[read] ( literal[int] )
keyword[if] identifier[sign] == literal[string] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def hasMZSignature(self, rd):
"""
Check for MZ signature.
@type rd: L{ReadData}
@param rd: A L{ReadData} object.
@rtype: bool
@return: True is the given L{ReadData} stream has the MZ signature. Otherwise, False.
"""
rd.setOffset(0)
sign = rd.read(2)
if sign == 'MZ':
return True # depends on [control=['if'], data=[]]
return False |
def addRelationship(self, pid, subject, predicate, object, isLiteral=False,
datatype=None):
"""
Wrapper function for `Fedora REST API addRelationship <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-addRelationship>`_
:param pid: persistent id for the object to add the new relationship to
:param subject: subject of the relationship; object or datastream URI
:param predicate: predicate of the new relationship
:param object: object of the relationship
:param isLiteral: true if object is literal, false if it is a URI;
Fedora has no default; this method defaults to False
:param datatype: optional datatype for literal objects
:returns: boolean success
"""
http_args = {'subject': subject, 'predicate': predicate,
'object': object, 'isLiteral': isLiteral}
if datatype is not None:
http_args['datatype'] = datatype
url = 'objects/%(pid)s/relationships/new' % {'pid': pid}
response = self.post(url, params=http_args)
return response.status_code == requests.codes.ok | def function[addRelationship, parameter[self, pid, subject, predicate, object, isLiteral, datatype]]:
constant[
Wrapper function for `Fedora REST API addRelationship <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-addRelationship>`_
:param pid: persistent id for the object to add the new relationship to
:param subject: subject of the relationship; object or datastream URI
:param predicate: predicate of the new relationship
:param object: object of the relationship
:param isLiteral: true if object is literal, false if it is a URI;
Fedora has no default; this method defaults to False
:param datatype: optional datatype for literal objects
:returns: boolean success
]
variable[http_args] assign[=] dictionary[[<ast.Constant object at 0x7da1b26a70a0>, <ast.Constant object at 0x7da1b26a5c00>, <ast.Constant object at 0x7da1b26a5f60>, <ast.Constant object at 0x7da1b26a6140>], [<ast.Name object at 0x7da1b26a5e70>, <ast.Name object at 0x7da1b26a7520>, <ast.Name object at 0x7da1b26a7010>, <ast.Name object at 0x7da1b26a6c20>]]
if compare[name[datatype] is_not constant[None]] begin[:]
call[name[http_args]][constant[datatype]] assign[=] name[datatype]
variable[url] assign[=] binary_operation[constant[objects/%(pid)s/relationships/new] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b26a7250>], [<ast.Name object at 0x7da1b26a7700>]]]
variable[response] assign[=] call[name[self].post, parameter[name[url]]]
return[compare[name[response].status_code equal[==] name[requests].codes.ok]] | keyword[def] identifier[addRelationship] ( identifier[self] , identifier[pid] , identifier[subject] , identifier[predicate] , identifier[object] , identifier[isLiteral] = keyword[False] ,
identifier[datatype] = keyword[None] ):
literal[string]
identifier[http_args] ={ literal[string] : identifier[subject] , literal[string] : identifier[predicate] ,
literal[string] : identifier[object] , literal[string] : identifier[isLiteral] }
keyword[if] identifier[datatype] keyword[is] keyword[not] keyword[None] :
identifier[http_args] [ literal[string] ]= identifier[datatype]
identifier[url] = literal[string] %{ literal[string] : identifier[pid] }
identifier[response] = identifier[self] . identifier[post] ( identifier[url] , identifier[params] = identifier[http_args] )
keyword[return] identifier[response] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[ok] | def addRelationship(self, pid, subject, predicate, object, isLiteral=False, datatype=None):
"""
Wrapper function for `Fedora REST API addRelationship <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-addRelationship>`_
:param pid: persistent id for the object to add the new relationship to
:param subject: subject of the relationship; object or datastream URI
:param predicate: predicate of the new relationship
:param object: object of the relationship
:param isLiteral: true if object is literal, false if it is a URI;
Fedora has no default; this method defaults to False
:param datatype: optional datatype for literal objects
:returns: boolean success
"""
http_args = {'subject': subject, 'predicate': predicate, 'object': object, 'isLiteral': isLiteral}
if datatype is not None:
http_args['datatype'] = datatype # depends on [control=['if'], data=['datatype']]
url = 'objects/%(pid)s/relationships/new' % {'pid': pid}
response = self.post(url, params=http_args)
return response.status_code == requests.codes.ok |
def add_template_for_node(name, node_id):
"Set the template to use to display the node"
with current_app.app_context():
db.execute(text(fetch_query_string('insert_template.sql')),
name=name, node_id=node_id)
result = db.execute(text(fetch_query_string('select_template.sql')),
name=name, node_id=node_id).fetchall()
if result:
template_id = result[0]['id']
db.execute(text(fetch_query_string('update_template_node.sql')),
template=template_id, node_id=node_id) | def function[add_template_for_node, parameter[name, node_id]]:
constant[Set the template to use to display the node]
with call[name[current_app].app_context, parameter[]] begin[:]
call[name[db].execute, parameter[call[name[text], parameter[call[name[fetch_query_string], parameter[constant[insert_template.sql]]]]]]]
variable[result] assign[=] call[call[name[db].execute, parameter[call[name[text], parameter[call[name[fetch_query_string], parameter[constant[select_template.sql]]]]]]].fetchall, parameter[]]
if name[result] begin[:]
variable[template_id] assign[=] call[call[name[result]][constant[0]]][constant[id]]
call[name[db].execute, parameter[call[name[text], parameter[call[name[fetch_query_string], parameter[constant[update_template_node.sql]]]]]]] | keyword[def] identifier[add_template_for_node] ( identifier[name] , identifier[node_id] ):
literal[string]
keyword[with] identifier[current_app] . identifier[app_context] ():
identifier[db] . identifier[execute] ( identifier[text] ( identifier[fetch_query_string] ( literal[string] )),
identifier[name] = identifier[name] , identifier[node_id] = identifier[node_id] )
identifier[result] = identifier[db] . identifier[execute] ( identifier[text] ( identifier[fetch_query_string] ( literal[string] )),
identifier[name] = identifier[name] , identifier[node_id] = identifier[node_id] ). identifier[fetchall] ()
keyword[if] identifier[result] :
identifier[template_id] = identifier[result] [ literal[int] ][ literal[string] ]
identifier[db] . identifier[execute] ( identifier[text] ( identifier[fetch_query_string] ( literal[string] )),
identifier[template] = identifier[template_id] , identifier[node_id] = identifier[node_id] ) | def add_template_for_node(name, node_id):
"""Set the template to use to display the node"""
with current_app.app_context():
db.execute(text(fetch_query_string('insert_template.sql')), name=name, node_id=node_id)
result = db.execute(text(fetch_query_string('select_template.sql')), name=name, node_id=node_id).fetchall()
if result:
template_id = result[0]['id']
db.execute(text(fetch_query_string('update_template_node.sql')), template=template_id, node_id=node_id) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] |
def prepare_read(data, method='readlines', mode='r'):
"""Prepare various input types for parsing.
Args:
data (iter): Data to read
method (str): Method to process data with
mode (str): Custom mode to process with, if data is a file
Returns:
list: List suitable for parsing
Raises:
TypeError: Invalid value for data
"""
if hasattr(data, 'readlines'):
data = getattr(data, method)()
elif isinstance(data, list):
if method == 'read':
return ''.join(data)
elif isinstance(data, basestring):
data = getattr(open(data, mode), method)()
else:
raise TypeError('Unable to handle data of type %r' % type(data))
return data | def function[prepare_read, parameter[data, method, mode]]:
constant[Prepare various input types for parsing.
Args:
data (iter): Data to read
method (str): Method to process data with
mode (str): Custom mode to process with, if data is a file
Returns:
list: List suitable for parsing
Raises:
TypeError: Invalid value for data
]
if call[name[hasattr], parameter[name[data], constant[readlines]]] begin[:]
variable[data] assign[=] call[call[name[getattr], parameter[name[data], name[method]]], parameter[]]
return[name[data]] | keyword[def] identifier[prepare_read] ( identifier[data] , identifier[method] = literal[string] , identifier[mode] = literal[string] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[data] = identifier[getattr] ( identifier[data] , identifier[method] )()
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[list] ):
keyword[if] identifier[method] == literal[string] :
keyword[return] literal[string] . identifier[join] ( identifier[data] )
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[basestring] ):
identifier[data] = identifier[getattr] ( identifier[open] ( identifier[data] , identifier[mode] ), identifier[method] )()
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[type] ( identifier[data] ))
keyword[return] identifier[data] | def prepare_read(data, method='readlines', mode='r'):
"""Prepare various input types for parsing.
Args:
data (iter): Data to read
method (str): Method to process data with
mode (str): Custom mode to process with, if data is a file
Returns:
list: List suitable for parsing
Raises:
TypeError: Invalid value for data
"""
if hasattr(data, 'readlines'):
data = getattr(data, method)() # depends on [control=['if'], data=[]]
elif isinstance(data, list):
if method == 'read':
return ''.join(data) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(data, basestring):
data = getattr(open(data, mode), method)() # depends on [control=['if'], data=[]]
else:
raise TypeError('Unable to handle data of type %r' % type(data))
return data |
def __vCmdConnectCameras(self, args):
'''ToDo: Validate the argument as a valid port'''
if len(args) >= 1:
self.WirelessPort = args[0]
print ("Connecting to Cameras on %s" % self.WirelessPort)
self.__vRegisterCameras() | def function[__vCmdConnectCameras, parameter[self, args]]:
constant[ToDo: Validate the argument as a valid port]
if compare[call[name[len], parameter[name[args]]] greater_or_equal[>=] constant[1]] begin[:]
name[self].WirelessPort assign[=] call[name[args]][constant[0]]
call[name[print], parameter[binary_operation[constant[Connecting to Cameras on %s] <ast.Mod object at 0x7da2590d6920> name[self].WirelessPort]]]
call[name[self].__vRegisterCameras, parameter[]] | keyword[def] identifier[__vCmdConnectCameras] ( identifier[self] , identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )>= literal[int] :
identifier[self] . identifier[WirelessPort] = identifier[args] [ literal[int] ]
identifier[print] ( literal[string] % identifier[self] . identifier[WirelessPort] )
identifier[self] . identifier[__vRegisterCameras] () | def __vCmdConnectCameras(self, args):
"""ToDo: Validate the argument as a valid port"""
if len(args) >= 1:
self.WirelessPort = args[0] # depends on [control=['if'], data=[]]
print('Connecting to Cameras on %s' % self.WirelessPort)
self.__vRegisterCameras() |
def cursor_position_changed(self):
"""Brace matching"""
if self.bracepos is not None:
self.__highlight(self.bracepos, cancel=True)
self.bracepos = None
cursor = self.textCursor()
if cursor.position() == 0:
return
cursor.movePosition(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor)
text = to_text_string(cursor.selectedText())
pos1 = cursor.position()
if text in (')', ']', '}'):
pos2 = self.find_brace_match(pos1, text, forward=False)
elif text in ('(', '[', '{'):
pos2 = self.find_brace_match(pos1, text, forward=True)
else:
return
if pos2 is not None:
self.bracepos = (pos1, pos2)
self.__highlight(self.bracepos, color=self.matched_p_color)
else:
self.bracepos = (pos1,)
self.__highlight(self.bracepos, color=self.unmatched_p_color) | def function[cursor_position_changed, parameter[self]]:
constant[Brace matching]
if compare[name[self].bracepos is_not constant[None]] begin[:]
call[name[self].__highlight, parameter[name[self].bracepos]]
name[self].bracepos assign[=] constant[None]
variable[cursor] assign[=] call[name[self].textCursor, parameter[]]
if compare[call[name[cursor].position, parameter[]] equal[==] constant[0]] begin[:]
return[None]
call[name[cursor].movePosition, parameter[name[QTextCursor].PreviousCharacter, name[QTextCursor].KeepAnchor]]
variable[text] assign[=] call[name[to_text_string], parameter[call[name[cursor].selectedText, parameter[]]]]
variable[pos1] assign[=] call[name[cursor].position, parameter[]]
if compare[name[text] in tuple[[<ast.Constant object at 0x7da18bccbf40>, <ast.Constant object at 0x7da18bcc9e40>, <ast.Constant object at 0x7da18bcc84f0>]]] begin[:]
variable[pos2] assign[=] call[name[self].find_brace_match, parameter[name[pos1], name[text]]]
if compare[name[pos2] is_not constant[None]] begin[:]
name[self].bracepos assign[=] tuple[[<ast.Name object at 0x7da18bccae00>, <ast.Name object at 0x7da18bcca1a0>]]
call[name[self].__highlight, parameter[name[self].bracepos]] | keyword[def] identifier[cursor_position_changed] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[bracepos] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[__highlight] ( identifier[self] . identifier[bracepos] , identifier[cancel] = keyword[True] )
identifier[self] . identifier[bracepos] = keyword[None]
identifier[cursor] = identifier[self] . identifier[textCursor] ()
keyword[if] identifier[cursor] . identifier[position] ()== literal[int] :
keyword[return]
identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[PreviousCharacter] ,
identifier[QTextCursor] . identifier[KeepAnchor] )
identifier[text] = identifier[to_text_string] ( identifier[cursor] . identifier[selectedText] ())
identifier[pos1] = identifier[cursor] . identifier[position] ()
keyword[if] identifier[text] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[pos2] = identifier[self] . identifier[find_brace_match] ( identifier[pos1] , identifier[text] , identifier[forward] = keyword[False] )
keyword[elif] identifier[text] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[pos2] = identifier[self] . identifier[find_brace_match] ( identifier[pos1] , identifier[text] , identifier[forward] = keyword[True] )
keyword[else] :
keyword[return]
keyword[if] identifier[pos2] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[bracepos] =( identifier[pos1] , identifier[pos2] )
identifier[self] . identifier[__highlight] ( identifier[self] . identifier[bracepos] , identifier[color] = identifier[self] . identifier[matched_p_color] )
keyword[else] :
identifier[self] . identifier[bracepos] =( identifier[pos1] ,)
identifier[self] . identifier[__highlight] ( identifier[self] . identifier[bracepos] , identifier[color] = identifier[self] . identifier[unmatched_p_color] ) | def cursor_position_changed(self):
"""Brace matching"""
if self.bracepos is not None:
self.__highlight(self.bracepos, cancel=True)
self.bracepos = None # depends on [control=['if'], data=[]]
cursor = self.textCursor()
if cursor.position() == 0:
return # depends on [control=['if'], data=[]]
cursor.movePosition(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor)
text = to_text_string(cursor.selectedText())
pos1 = cursor.position()
if text in (')', ']', '}'):
pos2 = self.find_brace_match(pos1, text, forward=False) # depends on [control=['if'], data=['text']]
elif text in ('(', '[', '{'):
pos2 = self.find_brace_match(pos1, text, forward=True) # depends on [control=['if'], data=['text']]
else:
return
if pos2 is not None:
self.bracepos = (pos1, pos2)
self.__highlight(self.bracepos, color=self.matched_p_color) # depends on [control=['if'], data=['pos2']]
else:
self.bracepos = (pos1,)
self.__highlight(self.bracepos, color=self.unmatched_p_color) |
def _make_it_list(dict_, field_name, value):
'''
Return the object list.
'''
prev_value = []
# firsly we'll collect the prev value
if field_name in dict_:
prev_value = dict_[field_name]
if value is None:
return prev_value
elif isinstance(value, (tuple, list)):
# other type of iterables
if field_name in ('source_port', 'destination_port'):
# port fields are more special
# they can either be a list of integers, either a list of tuples
# list of integers = a list of ports
# list of tuples = a list of ranges,
# e.g.: [(1000, 2000), (3000, 4000)] means the 1000-2000 and 3000-4000 ranges
portval = []
for port in value:
if not isinstance(port, (tuple, list)):
# to make sure everything is consistent,
# we'll transform indivitual ports into tuples
# thus an individual port e.g. 1000 will be transormed into the port range 1000-1000
# which is the equivalent
# but assures consistency for the Capirca parser
portval.append((port, port))
else:
portval.append(port)
translated_portval = []
# and the ports sent as string, e.g. ntp instead of 123
# needs to be translated
# again, using the same /etc/services
for port_start, port_end in portval:
if not isinstance(port_start, int):
port_start = _translate_port(port_start)
if not isinstance(port_end, int):
port_end = _translate_port(port_end)
translated_portval.append(
(port_start, port_end)
)
return list(set(prev_value + translated_portval))
return list(set(prev_value + list(value)))
if field_name in ('source_port', 'destination_port'):
if not isinstance(value, int):
value = _translate_port(value)
return list(set(prev_value + [(value, value)])) # a list of tuples
# anything else will be enclosed in a list-type
return list(set(prev_value + [value])) | def function[_make_it_list, parameter[dict_, field_name, value]]:
constant[
Return the object list.
]
variable[prev_value] assign[=] list[[]]
if compare[name[field_name] in name[dict_]] begin[:]
variable[prev_value] assign[=] call[name[dict_]][name[field_name]]
if compare[name[value] is constant[None]] begin[:]
return[name[prev_value]]
if compare[name[field_name] in tuple[[<ast.Constant object at 0x7da1b2344400>, <ast.Constant object at 0x7da1b2345f90>]]] begin[:]
if <ast.UnaryOp object at 0x7da1b2344430> begin[:]
variable[value] assign[=] call[name[_translate_port], parameter[name[value]]]
return[call[name[list], parameter[call[name[set], parameter[binary_operation[name[prev_value] + list[[<ast.Tuple object at 0x7da18bc70af0>]]]]]]]]
return[call[name[list], parameter[call[name[set], parameter[binary_operation[name[prev_value] + list[[<ast.Name object at 0x7da18bc70400>]]]]]]]] | keyword[def] identifier[_make_it_list] ( identifier[dict_] , identifier[field_name] , identifier[value] ):
literal[string]
identifier[prev_value] =[]
keyword[if] identifier[field_name] keyword[in] identifier[dict_] :
identifier[prev_value] = identifier[dict_] [ identifier[field_name] ]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] identifier[prev_value]
keyword[elif] identifier[isinstance] ( identifier[value] ,( identifier[tuple] , identifier[list] )):
keyword[if] identifier[field_name] keyword[in] ( literal[string] , literal[string] ):
identifier[portval] =[]
keyword[for] identifier[port] keyword[in] identifier[value] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[port] ,( identifier[tuple] , identifier[list] )):
identifier[portval] . identifier[append] (( identifier[port] , identifier[port] ))
keyword[else] :
identifier[portval] . identifier[append] ( identifier[port] )
identifier[translated_portval] =[]
keyword[for] identifier[port_start] , identifier[port_end] keyword[in] identifier[portval] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[port_start] , identifier[int] ):
identifier[port_start] = identifier[_translate_port] ( identifier[port_start] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[port_end] , identifier[int] ):
identifier[port_end] = identifier[_translate_port] ( identifier[port_end] )
identifier[translated_portval] . identifier[append] (
( identifier[port_start] , identifier[port_end] )
)
keyword[return] identifier[list] ( identifier[set] ( identifier[prev_value] + identifier[translated_portval] ))
keyword[return] identifier[list] ( identifier[set] ( identifier[prev_value] + identifier[list] ( identifier[value] )))
keyword[if] identifier[field_name] keyword[in] ( literal[string] , literal[string] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[int] ):
identifier[value] = identifier[_translate_port] ( identifier[value] )
keyword[return] identifier[list] ( identifier[set] ( identifier[prev_value] +[( identifier[value] , identifier[value] )]))
keyword[return] identifier[list] ( identifier[set] ( identifier[prev_value] +[ identifier[value] ])) | def _make_it_list(dict_, field_name, value):
"""
Return the object list.
"""
prev_value = []
# firsly we'll collect the prev value
if field_name in dict_:
prev_value = dict_[field_name] # depends on [control=['if'], data=['field_name', 'dict_']]
if value is None:
return prev_value # depends on [control=['if'], data=[]]
elif isinstance(value, (tuple, list)):
# other type of iterables
if field_name in ('source_port', 'destination_port'):
# port fields are more special
# they can either be a list of integers, either a list of tuples
# list of integers = a list of ports
# list of tuples = a list of ranges,
# e.g.: [(1000, 2000), (3000, 4000)] means the 1000-2000 and 3000-4000 ranges
portval = []
for port in value:
if not isinstance(port, (tuple, list)):
# to make sure everything is consistent,
# we'll transform indivitual ports into tuples
# thus an individual port e.g. 1000 will be transormed into the port range 1000-1000
# which is the equivalent
# but assures consistency for the Capirca parser
portval.append((port, port)) # depends on [control=['if'], data=[]]
else:
portval.append(port) # depends on [control=['for'], data=['port']]
translated_portval = []
# and the ports sent as string, e.g. ntp instead of 123
# needs to be translated
# again, using the same /etc/services
for (port_start, port_end) in portval:
if not isinstance(port_start, int):
port_start = _translate_port(port_start) # depends on [control=['if'], data=[]]
if not isinstance(port_end, int):
port_end = _translate_port(port_end) # depends on [control=['if'], data=[]]
translated_portval.append((port_start, port_end)) # depends on [control=['for'], data=[]]
return list(set(prev_value + translated_portval)) # depends on [control=['if'], data=[]]
return list(set(prev_value + list(value))) # depends on [control=['if'], data=[]]
if field_name in ('source_port', 'destination_port'):
if not isinstance(value, int):
value = _translate_port(value) # depends on [control=['if'], data=[]]
return list(set(prev_value + [(value, value)])) # a list of tuples # depends on [control=['if'], data=[]]
# anything else will be enclosed in a list-type
return list(set(prev_value + [value])) |
def get_time(self, loc4d=None):
"""
Based on a Location4D object and this Diel object, calculate
the time at which this Diel migration is actually happening
"""
if loc4d is None:
raise ValueError("Location4D object can not be None")
if self.pattern == self.PATTERN_CYCLE:
c = SunCycles.cycles(loc=loc4d)
if self.cycle == self.CYCLE_SUNRISE:
r = c[SunCycles.RISING]
elif self.cycle == self.CYCLE_SUNSET:
r = c[SunCycles.SETTING]
td = timedelta(hours=self.time_delta)
if self.plus_or_minus == self.HOURS_PLUS:
r = r + td
elif self.plus_or_minus == self.HOURS_MINUS:
r = r - td
return r
elif self.pattern == self.PATTERN_SPECIFICTIME:
return self._time.replace(year=loc4d.time.year, month=loc4d.time.month, day=loc4d.time.day) | def function[get_time, parameter[self, loc4d]]:
constant[
Based on a Location4D object and this Diel object, calculate
the time at which this Diel migration is actually happening
]
if compare[name[loc4d] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f7202b0>
if compare[name[self].pattern equal[==] name[self].PATTERN_CYCLE] begin[:]
variable[c] assign[=] call[name[SunCycles].cycles, parameter[]]
if compare[name[self].cycle equal[==] name[self].CYCLE_SUNRISE] begin[:]
variable[r] assign[=] call[name[c]][name[SunCycles].RISING]
variable[td] assign[=] call[name[timedelta], parameter[]]
if compare[name[self].plus_or_minus equal[==] name[self].HOURS_PLUS] begin[:]
variable[r] assign[=] binary_operation[name[r] + name[td]]
return[name[r]] | keyword[def] identifier[get_time] ( identifier[self] , identifier[loc4d] = keyword[None] ):
literal[string]
keyword[if] identifier[loc4d] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[pattern] == identifier[self] . identifier[PATTERN_CYCLE] :
identifier[c] = identifier[SunCycles] . identifier[cycles] ( identifier[loc] = identifier[loc4d] )
keyword[if] identifier[self] . identifier[cycle] == identifier[self] . identifier[CYCLE_SUNRISE] :
identifier[r] = identifier[c] [ identifier[SunCycles] . identifier[RISING] ]
keyword[elif] identifier[self] . identifier[cycle] == identifier[self] . identifier[CYCLE_SUNSET] :
identifier[r] = identifier[c] [ identifier[SunCycles] . identifier[SETTING] ]
identifier[td] = identifier[timedelta] ( identifier[hours] = identifier[self] . identifier[time_delta] )
keyword[if] identifier[self] . identifier[plus_or_minus] == identifier[self] . identifier[HOURS_PLUS] :
identifier[r] = identifier[r] + identifier[td]
keyword[elif] identifier[self] . identifier[plus_or_minus] == identifier[self] . identifier[HOURS_MINUS] :
identifier[r] = identifier[r] - identifier[td]
keyword[return] identifier[r]
keyword[elif] identifier[self] . identifier[pattern] == identifier[self] . identifier[PATTERN_SPECIFICTIME] :
keyword[return] identifier[self] . identifier[_time] . identifier[replace] ( identifier[year] = identifier[loc4d] . identifier[time] . identifier[year] , identifier[month] = identifier[loc4d] . identifier[time] . identifier[month] , identifier[day] = identifier[loc4d] . identifier[time] . identifier[day] ) | def get_time(self, loc4d=None):
"""
Based on a Location4D object and this Diel object, calculate
the time at which this Diel migration is actually happening
"""
if loc4d is None:
raise ValueError('Location4D object can not be None') # depends on [control=['if'], data=[]]
if self.pattern == self.PATTERN_CYCLE:
c = SunCycles.cycles(loc=loc4d)
if self.cycle == self.CYCLE_SUNRISE:
r = c[SunCycles.RISING] # depends on [control=['if'], data=[]]
elif self.cycle == self.CYCLE_SUNSET:
r = c[SunCycles.SETTING] # depends on [control=['if'], data=[]]
td = timedelta(hours=self.time_delta)
if self.plus_or_minus == self.HOURS_PLUS:
r = r + td # depends on [control=['if'], data=[]]
elif self.plus_or_minus == self.HOURS_MINUS:
r = r - td # depends on [control=['if'], data=[]]
return r # depends on [control=['if'], data=[]]
elif self.pattern == self.PATTERN_SPECIFICTIME:
return self._time.replace(year=loc4d.time.year, month=loc4d.time.month, day=loc4d.time.day) # depends on [control=['if'], data=[]] |
def ensure_namespace(self, name):
# type: (str) -> ApiNamespace
"""
Only creates a namespace if it hasn't yet been defined.
:param str name: Name of the namespace.
:return ApiNamespace:
"""
if name not in self.namespaces:
self.namespaces[name] = ApiNamespace(name)
return self.namespaces[name] | def function[ensure_namespace, parameter[self, name]]:
constant[
Only creates a namespace if it hasn't yet been defined.
:param str name: Name of the namespace.
:return ApiNamespace:
]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].namespaces] begin[:]
call[name[self].namespaces][name[name]] assign[=] call[name[ApiNamespace], parameter[name[name]]]
return[call[name[self].namespaces][name[name]]] | keyword[def] identifier[ensure_namespace] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[namespaces] :
identifier[self] . identifier[namespaces] [ identifier[name] ]= identifier[ApiNamespace] ( identifier[name] )
keyword[return] identifier[self] . identifier[namespaces] [ identifier[name] ] | def ensure_namespace(self, name):
# type: (str) -> ApiNamespace
"\n Only creates a namespace if it hasn't yet been defined.\n\n :param str name: Name of the namespace.\n\n :return ApiNamespace:\n "
if name not in self.namespaces:
self.namespaces[name] = ApiNamespace(name) # depends on [control=['if'], data=['name']]
return self.namespaces[name] |
def _resolve_device_type(self, device):
"""
Given a device, determines if it is a CloudServer, a CloudLoadBalancer,
or an invalid device.
"""
try:
from tests.unit import fakes
server_types = (pyrax.CloudServer, fakes.FakeServer)
lb_types = (CloudLoadBalancer, fakes.FakeLoadBalancer,
fakes.FakeDNSDevice)
except ImportError:
# Not running with tests
server_types = (pyrax.CloudServer, )
lb_types = (CloudLoadBalancer, )
if isinstance(device, server_types):
device_type = "server"
elif isinstance(device, lb_types):
device_type = "loadbalancer"
else:
raise exc.InvalidDeviceType("The device '%s' must be a CloudServer "
"or a CloudLoadBalancer." % device)
return device_type | def function[_resolve_device_type, parameter[self, device]]:
constant[
Given a device, determines if it is a CloudServer, a CloudLoadBalancer,
or an invalid device.
]
<ast.Try object at 0x7da2054a5a50>
if call[name[isinstance], parameter[name[device], name[server_types]]] begin[:]
variable[device_type] assign[=] constant[server]
return[name[device_type]] | keyword[def] identifier[_resolve_device_type] ( identifier[self] , identifier[device] ):
literal[string]
keyword[try] :
keyword[from] identifier[tests] . identifier[unit] keyword[import] identifier[fakes]
identifier[server_types] =( identifier[pyrax] . identifier[CloudServer] , identifier[fakes] . identifier[FakeServer] )
identifier[lb_types] =( identifier[CloudLoadBalancer] , identifier[fakes] . identifier[FakeLoadBalancer] ,
identifier[fakes] . identifier[FakeDNSDevice] )
keyword[except] identifier[ImportError] :
identifier[server_types] =( identifier[pyrax] . identifier[CloudServer] ,)
identifier[lb_types] =( identifier[CloudLoadBalancer] ,)
keyword[if] identifier[isinstance] ( identifier[device] , identifier[server_types] ):
identifier[device_type] = literal[string]
keyword[elif] identifier[isinstance] ( identifier[device] , identifier[lb_types] ):
identifier[device_type] = literal[string]
keyword[else] :
keyword[raise] identifier[exc] . identifier[InvalidDeviceType] ( literal[string]
literal[string] % identifier[device] )
keyword[return] identifier[device_type] | def _resolve_device_type(self, device):
"""
Given a device, determines if it is a CloudServer, a CloudLoadBalancer,
or an invalid device.
"""
try:
from tests.unit import fakes
server_types = (pyrax.CloudServer, fakes.FakeServer)
lb_types = (CloudLoadBalancer, fakes.FakeLoadBalancer, fakes.FakeDNSDevice) # depends on [control=['try'], data=[]]
except ImportError:
# Not running with tests
server_types = (pyrax.CloudServer,)
lb_types = (CloudLoadBalancer,) # depends on [control=['except'], data=[]]
if isinstance(device, server_types):
device_type = 'server' # depends on [control=['if'], data=[]]
elif isinstance(device, lb_types):
device_type = 'loadbalancer' # depends on [control=['if'], data=[]]
else:
raise exc.InvalidDeviceType("The device '%s' must be a CloudServer or a CloudLoadBalancer." % device)
return device_type |
def new_array(state, element_type, size):
"""
Allocates a new array in memory and returns the reference to the base.
"""
size_bounded = SimSootExpr_NewArray._bound_array_size(state, size)
# return the reference of the array base
# => elements getting lazy initialized in the javavm memory
return SimSootValue_ArrayBaseRef(heap_alloc_id=state.javavm_memory.get_new_uuid(),
element_type=element_type,
size=size_bounded) | def function[new_array, parameter[state, element_type, size]]:
constant[
Allocates a new array in memory and returns the reference to the base.
]
variable[size_bounded] assign[=] call[name[SimSootExpr_NewArray]._bound_array_size, parameter[name[state], name[size]]]
return[call[name[SimSootValue_ArrayBaseRef], parameter[]]] | keyword[def] identifier[new_array] ( identifier[state] , identifier[element_type] , identifier[size] ):
literal[string]
identifier[size_bounded] = identifier[SimSootExpr_NewArray] . identifier[_bound_array_size] ( identifier[state] , identifier[size] )
keyword[return] identifier[SimSootValue_ArrayBaseRef] ( identifier[heap_alloc_id] = identifier[state] . identifier[javavm_memory] . identifier[get_new_uuid] (),
identifier[element_type] = identifier[element_type] ,
identifier[size] = identifier[size_bounded] ) | def new_array(state, element_type, size):
"""
Allocates a new array in memory and returns the reference to the base.
"""
size_bounded = SimSootExpr_NewArray._bound_array_size(state, size)
# return the reference of the array base
# => elements getting lazy initialized in the javavm memory
return SimSootValue_ArrayBaseRef(heap_alloc_id=state.javavm_memory.get_new_uuid(), element_type=element_type, size=size_bounded) |
def _shade_remaining_missing(self):
"""Helper method to shade any missing labels remaining on the current
page. Not intended for external use.
Note that this will modify the internal _position attribute and should
therefore only be used once all the 'real' labels have been drawn.
"""
# Sanity check.
if not self.shade_missing:
return
# Run through each missing label left in the current page and shade it.
missing = self._used.get(self.page_count, set())
for position in missing:
self._position = position
self._shade_missing_label() | def function[_shade_remaining_missing, parameter[self]]:
constant[Helper method to shade any missing labels remaining on the current
page. Not intended for external use.
Note that this will modify the internal _position attribute and should
therefore only be used once all the 'real' labels have been drawn.
]
if <ast.UnaryOp object at 0x7da20c6e56c0> begin[:]
return[None]
variable[missing] assign[=] call[name[self]._used.get, parameter[name[self].page_count, call[name[set], parameter[]]]]
for taget[name[position]] in starred[name[missing]] begin[:]
name[self]._position assign[=] name[position]
call[name[self]._shade_missing_label, parameter[]] | keyword[def] identifier[_shade_remaining_missing] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[shade_missing] :
keyword[return]
identifier[missing] = identifier[self] . identifier[_used] . identifier[get] ( identifier[self] . identifier[page_count] , identifier[set] ())
keyword[for] identifier[position] keyword[in] identifier[missing] :
identifier[self] . identifier[_position] = identifier[position]
identifier[self] . identifier[_shade_missing_label] () | def _shade_remaining_missing(self):
"""Helper method to shade any missing labels remaining on the current
page. Not intended for external use.
Note that this will modify the internal _position attribute and should
therefore only be used once all the 'real' labels have been drawn.
"""
# Sanity check.
if not self.shade_missing:
return # depends on [control=['if'], data=[]]
# Run through each missing label left in the current page and shade it.
missing = self._used.get(self.page_count, set())
for position in missing:
self._position = position
self._shade_missing_label() # depends on [control=['for'], data=['position']] |
def get_or_none(cls, **filter_kwargs):
"""
Returns a video or None.
"""
try:
video = cls.objects.get(**filter_kwargs)
except cls.DoesNotExist:
video = None
return video | def function[get_or_none, parameter[cls]]:
constant[
Returns a video or None.
]
<ast.Try object at 0x7da1b05fb490>
return[name[video]] | keyword[def] identifier[get_or_none] ( identifier[cls] ,** identifier[filter_kwargs] ):
literal[string]
keyword[try] :
identifier[video] = identifier[cls] . identifier[objects] . identifier[get] (** identifier[filter_kwargs] )
keyword[except] identifier[cls] . identifier[DoesNotExist] :
identifier[video] = keyword[None]
keyword[return] identifier[video] | def get_or_none(cls, **filter_kwargs):
"""
Returns a video or None.
"""
try:
video = cls.objects.get(**filter_kwargs) # depends on [control=['try'], data=[]]
except cls.DoesNotExist:
video = None # depends on [control=['except'], data=[]]
return video |
def profile(request):
'''
Get or set user profile.
'''
serializer_class = registration_settings.PROFILE_SERIALIZER_CLASS
if request.method in ['POST', 'PUT', 'PATCH']:
partial = request.method == 'PATCH'
serializer = serializer_class(
instance=request.user,
data=request.data,
partial=partial,
)
serializer.is_valid(raise_exception=True)
serializer.save()
else: # request.method == 'GET':
serializer = serializer_class(instance=request.user)
return Response(serializer.data) | def function[profile, parameter[request]]:
constant[
Get or set user profile.
]
variable[serializer_class] assign[=] name[registration_settings].PROFILE_SERIALIZER_CLASS
if compare[name[request].method in list[[<ast.Constant object at 0x7da1b170ed40>, <ast.Constant object at 0x7da1b170e560>, <ast.Constant object at 0x7da1b170e080>]]] begin[:]
variable[partial] assign[=] compare[name[request].method equal[==] constant[PATCH]]
variable[serializer] assign[=] call[name[serializer_class], parameter[]]
call[name[serializer].is_valid, parameter[]]
call[name[serializer].save, parameter[]]
return[call[name[Response], parameter[name[serializer].data]]] | keyword[def] identifier[profile] ( identifier[request] ):
literal[string]
identifier[serializer_class] = identifier[registration_settings] . identifier[PROFILE_SERIALIZER_CLASS]
keyword[if] identifier[request] . identifier[method] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[partial] = identifier[request] . identifier[method] == literal[string]
identifier[serializer] = identifier[serializer_class] (
identifier[instance] = identifier[request] . identifier[user] ,
identifier[data] = identifier[request] . identifier[data] ,
identifier[partial] = identifier[partial] ,
)
identifier[serializer] . identifier[is_valid] ( identifier[raise_exception] = keyword[True] )
identifier[serializer] . identifier[save] ()
keyword[else] :
identifier[serializer] = identifier[serializer_class] ( identifier[instance] = identifier[request] . identifier[user] )
keyword[return] identifier[Response] ( identifier[serializer] . identifier[data] ) | def profile(request):
"""
Get or set user profile.
"""
serializer_class = registration_settings.PROFILE_SERIALIZER_CLASS
if request.method in ['POST', 'PUT', 'PATCH']:
partial = request.method == 'PATCH'
serializer = serializer_class(instance=request.user, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
serializer.save() # depends on [control=['if'], data=[]]
else: # request.method == 'GET':
serializer = serializer_class(instance=request.user)
return Response(serializer.data) |
def to_array(self):
"""
Serializes this PreCheckoutQuery to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(PreCheckoutQuery, self).to_array()
array['id'] = u(self.id) # py2: type unicode, py3: type str
array['from'] = self.from_peer.to_array() # type User
array['currency'] = u(self.currency) # py2: type unicode, py3: type str
array['total_amount'] = int(self.total_amount) # type int
array['invoice_payload'] = u(self.invoice_payload) # py2: type unicode, py3: type str
if self.shipping_option_id is not None:
array['shipping_option_id'] = u(self.shipping_option_id) # py2: type unicode, py3: type str
if self.order_info is not None:
array['order_info'] = self.order_info.to_array() # type OrderInfo
return array | def function[to_array, parameter[self]]:
constant[
Serializes this PreCheckoutQuery to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
]
variable[array] assign[=] call[call[name[super], parameter[name[PreCheckoutQuery], name[self]]].to_array, parameter[]]
call[name[array]][constant[id]] assign[=] call[name[u], parameter[name[self].id]]
call[name[array]][constant[from]] assign[=] call[name[self].from_peer.to_array, parameter[]]
call[name[array]][constant[currency]] assign[=] call[name[u], parameter[name[self].currency]]
call[name[array]][constant[total_amount]] assign[=] call[name[int], parameter[name[self].total_amount]]
call[name[array]][constant[invoice_payload]] assign[=] call[name[u], parameter[name[self].invoice_payload]]
if compare[name[self].shipping_option_id is_not constant[None]] begin[:]
call[name[array]][constant[shipping_option_id]] assign[=] call[name[u], parameter[name[self].shipping_option_id]]
if compare[name[self].order_info is_not constant[None]] begin[:]
call[name[array]][constant[order_info]] assign[=] call[name[self].order_info.to_array, parameter[]]
return[name[array]] | keyword[def] identifier[to_array] ( identifier[self] ):
literal[string]
identifier[array] = identifier[super] ( identifier[PreCheckoutQuery] , identifier[self] ). identifier[to_array] ()
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[id] )
identifier[array] [ literal[string] ]= identifier[self] . identifier[from_peer] . identifier[to_array] ()
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[currency] )
identifier[array] [ literal[string] ]= identifier[int] ( identifier[self] . identifier[total_amount] )
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[invoice_payload] )
keyword[if] identifier[self] . identifier[shipping_option_id] keyword[is] keyword[not] keyword[None] :
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[shipping_option_id] )
keyword[if] identifier[self] . identifier[order_info] keyword[is] keyword[not] keyword[None] :
identifier[array] [ literal[string] ]= identifier[self] . identifier[order_info] . identifier[to_array] ()
keyword[return] identifier[array] | def to_array(self):
"""
Serializes this PreCheckoutQuery to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(PreCheckoutQuery, self).to_array()
array['id'] = u(self.id) # py2: type unicode, py3: type str
array['from'] = self.from_peer.to_array() # type User
array['currency'] = u(self.currency) # py2: type unicode, py3: type str
array['total_amount'] = int(self.total_amount) # type int
array['invoice_payload'] = u(self.invoice_payload) # py2: type unicode, py3: type str
if self.shipping_option_id is not None:
array['shipping_option_id'] = u(self.shipping_option_id) # py2: type unicode, py3: type str # depends on [control=['if'], data=[]]
if self.order_info is not None:
array['order_info'] = self.order_info.to_array() # type OrderInfo # depends on [control=['if'], data=[]]
return array |
def _joint_covariances(self):
"""Compute prior covariances for all variables via dynamic programming.
Returns:
latent_covs: Prior covariance matrices of latent states `z_t`, as
a `Tensor` of shape `batch_shape + [num_timesteps,
latent_size, latent_size]`
observation_covs: Prior covariance matrices of observations
`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size, observation_size]`
"""
with tf.name_scope("covariance_joint"):
with tf.control_dependencies(self.runtime_assertions):
initial_latent_cov = _broadcast_to_shape(
self.initial_state_prior.covariance(),
tf.concat([self.batch_shape_tensor(),
[self.latent_size, self.latent_size]], axis=0))
initial_observation_cov = _propagate_cov(
initial_latent_cov,
self.get_observation_matrix_for_timestep(self.initial_step),
self.get_observation_noise_for_timestep(self.initial_step))
cov_step = build_kalman_cov_step(
self.get_transition_matrix_for_timestep,
self.get_transition_noise_for_timestep,
self.get_observation_matrix_for_timestep,
self.get_observation_noise_for_timestep)
# Scan over all timesteps following the initial step.
(latent_covs, observation_covs) = tf.scan(
cov_step,
elems=tf.range(self.initial_step+1, self.final_step),
initializer=(initial_latent_cov, initial_observation_cov))
# Squish the initial step back on top of the other (scanned) timesteps
latent_covs = tf.concat([initial_latent_cov[tf.newaxis, ...],
latent_covs], axis=0)
observation_covs = tf.concat([initial_observation_cov[tf.newaxis, ...],
observation_covs], axis=0)
# Put dimensions back in order. The samples we've computed have
# shape `[num_timesteps, batch_shape, size, size]`, where `size`
# is the dimension of the state or observation spaces
# respectively, but we want to return values with shape
# `[batch_shape, num_timesteps, size, size]`.
latent_covs = distribution_util.move_dimension(latent_covs, 0, -3)
observation_covs = distribution_util.move_dimension(
observation_covs, 0, -3)
return latent_covs, observation_covs | def function[_joint_covariances, parameter[self]]:
constant[Compute prior covariances for all variables via dynamic programming.
Returns:
latent_covs: Prior covariance matrices of latent states `z_t`, as
a `Tensor` of shape `batch_shape + [num_timesteps,
latent_size, latent_size]`
observation_covs: Prior covariance matrices of observations
`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size, observation_size]`
]
with call[name[tf].name_scope, parameter[constant[covariance_joint]]] begin[:]
with call[name[tf].control_dependencies, parameter[name[self].runtime_assertions]] begin[:]
variable[initial_latent_cov] assign[=] call[name[_broadcast_to_shape], parameter[call[name[self].initial_state_prior.covariance, parameter[]], call[name[tf].concat, parameter[list[[<ast.Call object at 0x7da1b03fa530>, <ast.List object at 0x7da1b03fb0a0>]]]]]]
variable[initial_observation_cov] assign[=] call[name[_propagate_cov], parameter[name[initial_latent_cov], call[name[self].get_observation_matrix_for_timestep, parameter[name[self].initial_step]], call[name[self].get_observation_noise_for_timestep, parameter[name[self].initial_step]]]]
variable[cov_step] assign[=] call[name[build_kalman_cov_step], parameter[name[self].get_transition_matrix_for_timestep, name[self].get_transition_noise_for_timestep, name[self].get_observation_matrix_for_timestep, name[self].get_observation_noise_for_timestep]]
<ast.Tuple object at 0x7da1b0211870> assign[=] call[name[tf].scan, parameter[name[cov_step]]]
variable[latent_covs] assign[=] call[name[tf].concat, parameter[list[[<ast.Subscript object at 0x7da1b0213250>, <ast.Name object at 0x7da1b0212650>]]]]
variable[observation_covs] assign[=] call[name[tf].concat, parameter[list[[<ast.Subscript object at 0x7da1b0213f10>, <ast.Name object at 0x7da1b02117b0>]]]]
variable[latent_covs] assign[=] call[name[distribution_util].move_dimension, parameter[name[latent_covs], constant[0], <ast.UnaryOp object at 0x7da1b0213070>]]
variable[observation_covs] assign[=] call[name[distribution_util].move_dimension, parameter[name[observation_covs], constant[0], <ast.UnaryOp object at 0x7da1b0211f00>]]
return[tuple[[<ast.Name object at 0x7da1b0213670>, <ast.Name object at 0x7da1b0211b70>]]] | keyword[def] identifier[_joint_covariances] ( identifier[self] ):
literal[string]
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
keyword[with] identifier[tf] . identifier[control_dependencies] ( identifier[self] . identifier[runtime_assertions] ):
identifier[initial_latent_cov] = identifier[_broadcast_to_shape] (
identifier[self] . identifier[initial_state_prior] . identifier[covariance] (),
identifier[tf] . identifier[concat] ([ identifier[self] . identifier[batch_shape_tensor] (),
[ identifier[self] . identifier[latent_size] , identifier[self] . identifier[latent_size] ]], identifier[axis] = literal[int] ))
identifier[initial_observation_cov] = identifier[_propagate_cov] (
identifier[initial_latent_cov] ,
identifier[self] . identifier[get_observation_matrix_for_timestep] ( identifier[self] . identifier[initial_step] ),
identifier[self] . identifier[get_observation_noise_for_timestep] ( identifier[self] . identifier[initial_step] ))
identifier[cov_step] = identifier[build_kalman_cov_step] (
identifier[self] . identifier[get_transition_matrix_for_timestep] ,
identifier[self] . identifier[get_transition_noise_for_timestep] ,
identifier[self] . identifier[get_observation_matrix_for_timestep] ,
identifier[self] . identifier[get_observation_noise_for_timestep] )
( identifier[latent_covs] , identifier[observation_covs] )= identifier[tf] . identifier[scan] (
identifier[cov_step] ,
identifier[elems] = identifier[tf] . identifier[range] ( identifier[self] . identifier[initial_step] + literal[int] , identifier[self] . identifier[final_step] ),
identifier[initializer] =( identifier[initial_latent_cov] , identifier[initial_observation_cov] ))
identifier[latent_covs] = identifier[tf] . identifier[concat] ([ identifier[initial_latent_cov] [ identifier[tf] . identifier[newaxis] ,...],
identifier[latent_covs] ], identifier[axis] = literal[int] )
identifier[observation_covs] = identifier[tf] . identifier[concat] ([ identifier[initial_observation_cov] [ identifier[tf] . identifier[newaxis] ,...],
identifier[observation_covs] ], identifier[axis] = literal[int] )
identifier[latent_covs] = identifier[distribution_util] . identifier[move_dimension] ( identifier[latent_covs] , literal[int] ,- literal[int] )
identifier[observation_covs] = identifier[distribution_util] . identifier[move_dimension] (
identifier[observation_covs] , literal[int] ,- literal[int] )
keyword[return] identifier[latent_covs] , identifier[observation_covs] | def _joint_covariances(self):
"""Compute prior covariances for all variables via dynamic programming.
Returns:
latent_covs: Prior covariance matrices of latent states `z_t`, as
a `Tensor` of shape `batch_shape + [num_timesteps,
latent_size, latent_size]`
observation_covs: Prior covariance matrices of observations
`x_t`, as a `Tensor` of shape `batch_shape + [num_timesteps,
observation_size, observation_size]`
"""
with tf.name_scope('covariance_joint'):
with tf.control_dependencies(self.runtime_assertions):
initial_latent_cov = _broadcast_to_shape(self.initial_state_prior.covariance(), tf.concat([self.batch_shape_tensor(), [self.latent_size, self.latent_size]], axis=0)) # depends on [control=['with'], data=[]]
initial_observation_cov = _propagate_cov(initial_latent_cov, self.get_observation_matrix_for_timestep(self.initial_step), self.get_observation_noise_for_timestep(self.initial_step))
cov_step = build_kalman_cov_step(self.get_transition_matrix_for_timestep, self.get_transition_noise_for_timestep, self.get_observation_matrix_for_timestep, self.get_observation_noise_for_timestep)
# Scan over all timesteps following the initial step.
(latent_covs, observation_covs) = tf.scan(cov_step, elems=tf.range(self.initial_step + 1, self.final_step), initializer=(initial_latent_cov, initial_observation_cov))
# Squish the initial step back on top of the other (scanned) timesteps
latent_covs = tf.concat([initial_latent_cov[tf.newaxis, ...], latent_covs], axis=0)
observation_covs = tf.concat([initial_observation_cov[tf.newaxis, ...], observation_covs], axis=0)
# Put dimensions back in order. The samples we've computed have
# shape `[num_timesteps, batch_shape, size, size]`, where `size`
# is the dimension of the state or observation spaces
# respectively, but we want to return values with shape
# `[batch_shape, num_timesteps, size, size]`.
latent_covs = distribution_util.move_dimension(latent_covs, 0, -3)
observation_covs = distribution_util.move_dimension(observation_covs, 0, -3)
return (latent_covs, observation_covs) # depends on [control=['with'], data=[]] |
def delete(self):
""" Delete
Deletes directory and drops the file name from dictionary. File on
file system removed on disk.
"""
directory = self._directory()
assert isinstance(directory, Directory)
realPath = self.realPath
assert (os.path.exists(realPath))
os.remove(realPath)
directory._fileDeleted(self) | def function[delete, parameter[self]]:
constant[ Delete
Deletes directory and drops the file name from dictionary. File on
file system removed on disk.
]
variable[directory] assign[=] call[name[self]._directory, parameter[]]
assert[call[name[isinstance], parameter[name[directory], name[Directory]]]]
variable[realPath] assign[=] name[self].realPath
assert[call[name[os].path.exists, parameter[name[realPath]]]]
call[name[os].remove, parameter[name[realPath]]]
call[name[directory]._fileDeleted, parameter[name[self]]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
identifier[directory] = identifier[self] . identifier[_directory] ()
keyword[assert] identifier[isinstance] ( identifier[directory] , identifier[Directory] )
identifier[realPath] = identifier[self] . identifier[realPath]
keyword[assert] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[realPath] ))
identifier[os] . identifier[remove] ( identifier[realPath] )
identifier[directory] . identifier[_fileDeleted] ( identifier[self] ) | def delete(self):
""" Delete
Deletes directory and drops the file name from dictionary. File on
file system removed on disk.
"""
directory = self._directory()
assert isinstance(directory, Directory)
realPath = self.realPath
assert os.path.exists(realPath)
os.remove(realPath)
directory._fileDeleted(self) |
def _validate_flux_unit(new_unit, wav_only=False):
"""Make sure flux unit is valid."""
new_unit = units.validate_unit(new_unit)
acceptable_types = ['spectral flux density wav',
'photon flux density wav']
acceptable_names = ['PHOTLAM', 'FLAM']
if not wav_only: # Include per Hz units
acceptable_types += ['spectral flux density',
'photon flux density']
acceptable_names += ['PHOTNU', 'FNU', 'Jy']
if new_unit.physical_type not in acceptable_types:
raise exceptions.SynphotError(
'Source spectrum cannot operate in {0}. Acceptable units: '
'{1}'.format(new_unit, ','.join(acceptable_names)))
return new_unit | def function[_validate_flux_unit, parameter[new_unit, wav_only]]:
constant[Make sure flux unit is valid.]
variable[new_unit] assign[=] call[name[units].validate_unit, parameter[name[new_unit]]]
variable[acceptable_types] assign[=] list[[<ast.Constant object at 0x7da18f00d930>, <ast.Constant object at 0x7da18f00f220>]]
variable[acceptable_names] assign[=] list[[<ast.Constant object at 0x7da18f00d3c0>, <ast.Constant object at 0x7da18f00d420>]]
if <ast.UnaryOp object at 0x7da18f00e7d0> begin[:]
<ast.AugAssign object at 0x7da18f00cbe0>
<ast.AugAssign object at 0x7da18f00c100>
if compare[name[new_unit].physical_type <ast.NotIn object at 0x7da2590d7190> name[acceptable_types]] begin[:]
<ast.Raise object at 0x7da18f00e3b0>
return[name[new_unit]] | keyword[def] identifier[_validate_flux_unit] ( identifier[new_unit] , identifier[wav_only] = keyword[False] ):
literal[string]
identifier[new_unit] = identifier[units] . identifier[validate_unit] ( identifier[new_unit] )
identifier[acceptable_types] =[ literal[string] ,
literal[string] ]
identifier[acceptable_names] =[ literal[string] , literal[string] ]
keyword[if] keyword[not] identifier[wav_only] :
identifier[acceptable_types] +=[ literal[string] ,
literal[string] ]
identifier[acceptable_names] +=[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[new_unit] . identifier[physical_type] keyword[not] keyword[in] identifier[acceptable_types] :
keyword[raise] identifier[exceptions] . identifier[SynphotError] (
literal[string]
literal[string] . identifier[format] ( identifier[new_unit] , literal[string] . identifier[join] ( identifier[acceptable_names] )))
keyword[return] identifier[new_unit] | def _validate_flux_unit(new_unit, wav_only=False):
"""Make sure flux unit is valid."""
new_unit = units.validate_unit(new_unit)
acceptable_types = ['spectral flux density wav', 'photon flux density wav']
acceptable_names = ['PHOTLAM', 'FLAM']
if not wav_only: # Include per Hz units
acceptable_types += ['spectral flux density', 'photon flux density']
acceptable_names += ['PHOTNU', 'FNU', 'Jy'] # depends on [control=['if'], data=[]]
if new_unit.physical_type not in acceptable_types:
raise exceptions.SynphotError('Source spectrum cannot operate in {0}. Acceptable units: {1}'.format(new_unit, ','.join(acceptable_names))) # depends on [control=['if'], data=[]]
return new_unit |
def _HuntObjectFromRow(self, row):
"""Generates a flow object from a database row."""
(
create_time,
last_update_time,
creator,
duration_micros,
client_rate,
client_limit,
hunt_state,
hunt_state_comment,
init_start_time,
last_start_time,
num_clients_at_start_time,
description,
body,
) = row
hunt_obj = rdf_hunt_objects.Hunt.FromSerializedString(body)
hunt_obj.duration = rdfvalue.Duration.FromMicroseconds(duration_micros)
hunt_obj.create_time = mysql_utils.TimestampToRDFDatetime(create_time)
hunt_obj.last_update_time = mysql_utils.TimestampToRDFDatetime(
last_update_time)
# Checks below are needed for hunts that were written to the database before
# respective fields became part of F1 schema.
if creator is not None:
hunt_obj.creator = creator
if client_rate is not None:
hunt_obj.client_rate = client_rate
if client_limit is not None:
hunt_obj.client_limit = client_limit
if hunt_state is not None:
hunt_obj.hunt_state = hunt_state
if hunt_state_comment is not None:
hunt_obj.hunt_state_comment = hunt_state_comment
if init_start_time is not None:
hunt_obj.init_start_time = mysql_utils.TimestampToRDFDatetime(
init_start_time)
if last_start_time is not None:
hunt_obj.last_start_time = mysql_utils.TimestampToRDFDatetime(
last_start_time)
if num_clients_at_start_time is not None:
hunt_obj.num_clients_at_start_time = num_clients_at_start_time
if description is not None:
hunt_obj.description = description
return hunt_obj | def function[_HuntObjectFromRow, parameter[self, row]]:
constant[Generates a flow object from a database row.]
<ast.Tuple object at 0x7da1b1c1b0a0> assign[=] name[row]
variable[hunt_obj] assign[=] call[name[rdf_hunt_objects].Hunt.FromSerializedString, parameter[name[body]]]
name[hunt_obj].duration assign[=] call[name[rdfvalue].Duration.FromMicroseconds, parameter[name[duration_micros]]]
name[hunt_obj].create_time assign[=] call[name[mysql_utils].TimestampToRDFDatetime, parameter[name[create_time]]]
name[hunt_obj].last_update_time assign[=] call[name[mysql_utils].TimestampToRDFDatetime, parameter[name[last_update_time]]]
if compare[name[creator] is_not constant[None]] begin[:]
name[hunt_obj].creator assign[=] name[creator]
if compare[name[client_rate] is_not constant[None]] begin[:]
name[hunt_obj].client_rate assign[=] name[client_rate]
if compare[name[client_limit] is_not constant[None]] begin[:]
name[hunt_obj].client_limit assign[=] name[client_limit]
if compare[name[hunt_state] is_not constant[None]] begin[:]
name[hunt_obj].hunt_state assign[=] name[hunt_state]
if compare[name[hunt_state_comment] is_not constant[None]] begin[:]
name[hunt_obj].hunt_state_comment assign[=] name[hunt_state_comment]
if compare[name[init_start_time] is_not constant[None]] begin[:]
name[hunt_obj].init_start_time assign[=] call[name[mysql_utils].TimestampToRDFDatetime, parameter[name[init_start_time]]]
if compare[name[last_start_time] is_not constant[None]] begin[:]
name[hunt_obj].last_start_time assign[=] call[name[mysql_utils].TimestampToRDFDatetime, parameter[name[last_start_time]]]
if compare[name[num_clients_at_start_time] is_not constant[None]] begin[:]
name[hunt_obj].num_clients_at_start_time assign[=] name[num_clients_at_start_time]
if compare[name[description] is_not constant[None]] begin[:]
name[hunt_obj].description assign[=] name[description]
return[name[hunt_obj]] | keyword[def] identifier[_HuntObjectFromRow] ( identifier[self] , identifier[row] ):
literal[string]
(
identifier[create_time] ,
identifier[last_update_time] ,
identifier[creator] ,
identifier[duration_micros] ,
identifier[client_rate] ,
identifier[client_limit] ,
identifier[hunt_state] ,
identifier[hunt_state_comment] ,
identifier[init_start_time] ,
identifier[last_start_time] ,
identifier[num_clients_at_start_time] ,
identifier[description] ,
identifier[body] ,
)= identifier[row]
identifier[hunt_obj] = identifier[rdf_hunt_objects] . identifier[Hunt] . identifier[FromSerializedString] ( identifier[body] )
identifier[hunt_obj] . identifier[duration] = identifier[rdfvalue] . identifier[Duration] . identifier[FromMicroseconds] ( identifier[duration_micros] )
identifier[hunt_obj] . identifier[create_time] = identifier[mysql_utils] . identifier[TimestampToRDFDatetime] ( identifier[create_time] )
identifier[hunt_obj] . identifier[last_update_time] = identifier[mysql_utils] . identifier[TimestampToRDFDatetime] (
identifier[last_update_time] )
keyword[if] identifier[creator] keyword[is] keyword[not] keyword[None] :
identifier[hunt_obj] . identifier[creator] = identifier[creator]
keyword[if] identifier[client_rate] keyword[is] keyword[not] keyword[None] :
identifier[hunt_obj] . identifier[client_rate] = identifier[client_rate]
keyword[if] identifier[client_limit] keyword[is] keyword[not] keyword[None] :
identifier[hunt_obj] . identifier[client_limit] = identifier[client_limit]
keyword[if] identifier[hunt_state] keyword[is] keyword[not] keyword[None] :
identifier[hunt_obj] . identifier[hunt_state] = identifier[hunt_state]
keyword[if] identifier[hunt_state_comment] keyword[is] keyword[not] keyword[None] :
identifier[hunt_obj] . identifier[hunt_state_comment] = identifier[hunt_state_comment]
keyword[if] identifier[init_start_time] keyword[is] keyword[not] keyword[None] :
identifier[hunt_obj] . identifier[init_start_time] = identifier[mysql_utils] . identifier[TimestampToRDFDatetime] (
identifier[init_start_time] )
keyword[if] identifier[last_start_time] keyword[is] keyword[not] keyword[None] :
identifier[hunt_obj] . identifier[last_start_time] = identifier[mysql_utils] . identifier[TimestampToRDFDatetime] (
identifier[last_start_time] )
keyword[if] identifier[num_clients_at_start_time] keyword[is] keyword[not] keyword[None] :
identifier[hunt_obj] . identifier[num_clients_at_start_time] = identifier[num_clients_at_start_time]
keyword[if] identifier[description] keyword[is] keyword[not] keyword[None] :
identifier[hunt_obj] . identifier[description] = identifier[description]
keyword[return] identifier[hunt_obj] | def _HuntObjectFromRow(self, row):
"""Generates a flow object from a database row."""
(create_time, last_update_time, creator, duration_micros, client_rate, client_limit, hunt_state, hunt_state_comment, init_start_time, last_start_time, num_clients_at_start_time, description, body) = row
hunt_obj = rdf_hunt_objects.Hunt.FromSerializedString(body)
hunt_obj.duration = rdfvalue.Duration.FromMicroseconds(duration_micros)
hunt_obj.create_time = mysql_utils.TimestampToRDFDatetime(create_time)
hunt_obj.last_update_time = mysql_utils.TimestampToRDFDatetime(last_update_time)
# Checks below are needed for hunts that were written to the database before
# respective fields became part of F1 schema.
if creator is not None:
hunt_obj.creator = creator # depends on [control=['if'], data=['creator']]
if client_rate is not None:
hunt_obj.client_rate = client_rate # depends on [control=['if'], data=['client_rate']]
if client_limit is not None:
hunt_obj.client_limit = client_limit # depends on [control=['if'], data=['client_limit']]
if hunt_state is not None:
hunt_obj.hunt_state = hunt_state # depends on [control=['if'], data=['hunt_state']]
if hunt_state_comment is not None:
hunt_obj.hunt_state_comment = hunt_state_comment # depends on [control=['if'], data=['hunt_state_comment']]
if init_start_time is not None:
hunt_obj.init_start_time = mysql_utils.TimestampToRDFDatetime(init_start_time) # depends on [control=['if'], data=['init_start_time']]
if last_start_time is not None:
hunt_obj.last_start_time = mysql_utils.TimestampToRDFDatetime(last_start_time) # depends on [control=['if'], data=['last_start_time']]
if num_clients_at_start_time is not None:
hunt_obj.num_clients_at_start_time = num_clients_at_start_time # depends on [control=['if'], data=['num_clients_at_start_time']]
if description is not None:
hunt_obj.description = description # depends on [control=['if'], data=['description']]
return hunt_obj |
def destroy(name, call=None):
"""
This function irreversibly destroys a virtual machine on the cloud provider.
Before doing so, it should fire an event on the Salt event bus.
The tag for this event is `salt/cloud/<vm name>/destroying`.
Once the virtual machine has been destroyed, another event is fired.
The tag for that event is `salt/cloud/<vm name>/destroyed`.
Dependencies:
list_nodes
@param name:
@type name: str
@param call:
@type call:
@return: True if all went well, otherwise an error message
@rtype: bool|str
"""
log.info("Attempting to delete instance %s", name)
if not vb_machine_exists(name):
return "{0} doesn't exist and can't be deleted".format(name)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
vb_destroy_machine(name)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
) | def function[destroy, parameter[name, call]]:
constant[
This function irreversibly destroys a virtual machine on the cloud provider.
Before doing so, it should fire an event on the Salt event bus.
The tag for this event is `salt/cloud/<vm name>/destroying`.
Once the virtual machine has been destroyed, another event is fired.
The tag for that event is `salt/cloud/<vm name>/destroyed`.
Dependencies:
list_nodes
@param name:
@type name: str
@param call:
@type call:
@return: True if all went well, otherwise an error message
@rtype: bool|str
]
call[name[log].info, parameter[constant[Attempting to delete instance %s], name[name]]]
if <ast.UnaryOp object at 0x7da1b2346650> begin[:]
return[call[constant[{0} doesn't exist and can't be deleted].format, parameter[name[name]]]]
call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[destroying instance], call[constant[salt/cloud/{0}/destroying].format, parameter[name[name]]]]]
call[name[vb_destroy_machine], parameter[name[name]]]
call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[destroyed instance], call[constant[salt/cloud/{0}/destroyed].format, parameter[name[name]]]]] | keyword[def] identifier[destroy] ( identifier[name] , identifier[call] = keyword[None] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] , identifier[name] )
keyword[if] keyword[not] identifier[vb_machine_exists] ( identifier[name] ):
keyword[return] literal[string] . identifier[format] ( identifier[name] )
identifier[__utils__] [ literal[string] ](
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[name] ),
identifier[args] ={ literal[string] : identifier[name] },
identifier[sock_dir] = identifier[__opts__] [ literal[string] ],
identifier[transport] = identifier[__opts__] [ literal[string] ]
)
identifier[vb_destroy_machine] ( identifier[name] )
identifier[__utils__] [ literal[string] ](
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[name] ),
identifier[args] ={ literal[string] : identifier[name] },
identifier[sock_dir] = identifier[__opts__] [ literal[string] ],
identifier[transport] = identifier[__opts__] [ literal[string] ]
) | def destroy(name, call=None):
"""
This function irreversibly destroys a virtual machine on the cloud provider.
Before doing so, it should fire an event on the Salt event bus.
The tag for this event is `salt/cloud/<vm name>/destroying`.
Once the virtual machine has been destroyed, another event is fired.
The tag for that event is `salt/cloud/<vm name>/destroyed`.
Dependencies:
list_nodes
@param name:
@type name: str
@param call:
@type call:
@return: True if all went well, otherwise an error message
@rtype: bool|str
"""
log.info('Attempting to delete instance %s', name)
if not vb_machine_exists(name):
return "{0} doesn't exist and can't be deleted".format(name) # depends on [control=['if'], data=[]]
__utils__['cloud.fire_event']('event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
vb_destroy_machine(name)
__utils__['cloud.fire_event']('event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) |
def children(self, node_parent):
"""!
@brief Returns list of children of node.
@param[in] node_parent (node): Node whose children are required.
@return (list) Children of node. If node haven't got any child then None is returned.
"""
if node_parent.left is not None:
yield node_parent.left
if node_parent.right is not None:
yield node_parent.right | def function[children, parameter[self, node_parent]]:
constant[!
@brief Returns list of children of node.
@param[in] node_parent (node): Node whose children are required.
@return (list) Children of node. If node haven't got any child then None is returned.
]
if compare[name[node_parent].left is_not constant[None]] begin[:]
<ast.Yield object at 0x7da20e955180>
if compare[name[node_parent].right is_not constant[None]] begin[:]
<ast.Yield object at 0x7da1b014e350> | keyword[def] identifier[children] ( identifier[self] , identifier[node_parent] ):
literal[string]
keyword[if] identifier[node_parent] . identifier[left] keyword[is] keyword[not] keyword[None] :
keyword[yield] identifier[node_parent] . identifier[left]
keyword[if] identifier[node_parent] . identifier[right] keyword[is] keyword[not] keyword[None] :
keyword[yield] identifier[node_parent] . identifier[right] | def children(self, node_parent):
"""!
@brief Returns list of children of node.
@param[in] node_parent (node): Node whose children are required.
@return (list) Children of node. If node haven't got any child then None is returned.
"""
if node_parent.left is not None:
yield node_parent.left # depends on [control=['if'], data=[]]
if node_parent.right is not None:
yield node_parent.right # depends on [control=['if'], data=[]] |
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None,
valid_length=None):
"""Unrolls an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if `layout` is 'NTC',
or (length, batch_size, ...) if `layout` is 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, optional
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if `None`.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If `False`, returns outputs as a list of Symbols.
If `True`, concatenates output across time steps
and returns a single symbol with shape
(batch_size, length, ...) if layout is 'NTC',
or (length, batch_size, ...) if layout is 'TNC'.
If `None`, output whatever is faster.
valid_length : Symbol, NDArray or None
`valid_length` specifies the length of the sequences in the batch without padding.
This option is especially useful for building sequence-to-sequence models where
the input and output sequences would potentially be padded.
If `valid_length` is None, all sequences are assumed to have the same length.
If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,).
The ith element will be the length of the ith sequence in the batch.
The last valid state will be return and the padded outputs will be masked with 0.
Note that `valid_length` must be smaller or equal to `length`.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
"""
# pylint: disable=too-many-locals
self.reset()
inputs, axis, F, batch_size = _format_sequence(length, inputs, layout, False)
begin_state = _get_begin_state(self, F, begin_state, inputs, batch_size)
states = begin_state
outputs = []
all_states = []
for i in range(length):
output, states = self(inputs[i], states)
outputs.append(output)
if valid_length is not None:
all_states.append(states)
if valid_length is not None:
states = [F.SequenceLast(F.stack(*ele_list, axis=0),
sequence_length=valid_length,
use_sequence_length=True,
axis=0)
for ele_list in zip(*all_states)]
outputs = _mask_sequence_variable_length(F, outputs, length, valid_length, axis, True)
outputs, _, _, _ = _format_sequence(length, outputs, layout, merge_outputs)
return outputs, states | def function[unroll, parameter[self, length, inputs, begin_state, layout, merge_outputs, valid_length]]:
constant[Unrolls an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if `layout` is 'NTC',
or (length, batch_size, ...) if `layout` is 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, optional
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if `None`.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If `False`, returns outputs as a list of Symbols.
If `True`, concatenates output across time steps
and returns a single symbol with shape
(batch_size, length, ...) if layout is 'NTC',
or (length, batch_size, ...) if layout is 'TNC'.
If `None`, output whatever is faster.
valid_length : Symbol, NDArray or None
`valid_length` specifies the length of the sequences in the batch without padding.
This option is especially useful for building sequence-to-sequence models where
the input and output sequences would potentially be padded.
If `valid_length` is None, all sequences are assumed to have the same length.
If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,).
The ith element will be the length of the ith sequence in the batch.
The last valid state will be return and the padded outputs will be masked with 0.
Note that `valid_length` must be smaller or equal to `length`.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
]
call[name[self].reset, parameter[]]
<ast.Tuple object at 0x7da1b204eec0> assign[=] call[name[_format_sequence], parameter[name[length], name[inputs], name[layout], constant[False]]]
variable[begin_state] assign[=] call[name[_get_begin_state], parameter[name[self], name[F], name[begin_state], name[inputs], name[batch_size]]]
variable[states] assign[=] name[begin_state]
variable[outputs] assign[=] list[[]]
variable[all_states] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[name[length]]]] begin[:]
<ast.Tuple object at 0x7da1b204ff10> assign[=] call[name[self], parameter[call[name[inputs]][name[i]], name[states]]]
call[name[outputs].append, parameter[name[output]]]
if compare[name[valid_length] is_not constant[None]] begin[:]
call[name[all_states].append, parameter[name[states]]]
if compare[name[valid_length] is_not constant[None]] begin[:]
variable[states] assign[=] <ast.ListComp object at 0x7da1b20660b0>
variable[outputs] assign[=] call[name[_mask_sequence_variable_length], parameter[name[F], name[outputs], name[length], name[valid_length], name[axis], constant[True]]]
<ast.Tuple object at 0x7da1b2067f70> assign[=] call[name[_format_sequence], parameter[name[length], name[outputs], name[layout], name[merge_outputs]]]
return[tuple[[<ast.Name object at 0x7da1b204e2f0>, <ast.Name object at 0x7da1b204cbe0>]]] | keyword[def] identifier[unroll] ( identifier[self] , identifier[length] , identifier[inputs] , identifier[begin_state] = keyword[None] , identifier[layout] = literal[string] , identifier[merge_outputs] = keyword[None] ,
identifier[valid_length] = keyword[None] ):
literal[string]
identifier[self] . identifier[reset] ()
identifier[inputs] , identifier[axis] , identifier[F] , identifier[batch_size] = identifier[_format_sequence] ( identifier[length] , identifier[inputs] , identifier[layout] , keyword[False] )
identifier[begin_state] = identifier[_get_begin_state] ( identifier[self] , identifier[F] , identifier[begin_state] , identifier[inputs] , identifier[batch_size] )
identifier[states] = identifier[begin_state]
identifier[outputs] =[]
identifier[all_states] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[length] ):
identifier[output] , identifier[states] = identifier[self] ( identifier[inputs] [ identifier[i] ], identifier[states] )
identifier[outputs] . identifier[append] ( identifier[output] )
keyword[if] identifier[valid_length] keyword[is] keyword[not] keyword[None] :
identifier[all_states] . identifier[append] ( identifier[states] )
keyword[if] identifier[valid_length] keyword[is] keyword[not] keyword[None] :
identifier[states] =[ identifier[F] . identifier[SequenceLast] ( identifier[F] . identifier[stack] (* identifier[ele_list] , identifier[axis] = literal[int] ),
identifier[sequence_length] = identifier[valid_length] ,
identifier[use_sequence_length] = keyword[True] ,
identifier[axis] = literal[int] )
keyword[for] identifier[ele_list] keyword[in] identifier[zip] (* identifier[all_states] )]
identifier[outputs] = identifier[_mask_sequence_variable_length] ( identifier[F] , identifier[outputs] , identifier[length] , identifier[valid_length] , identifier[axis] , keyword[True] )
identifier[outputs] , identifier[_] , identifier[_] , identifier[_] = identifier[_format_sequence] ( identifier[length] , identifier[outputs] , identifier[layout] , identifier[merge_outputs] )
keyword[return] identifier[outputs] , identifier[states] | def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None, valid_length=None):
"""Unrolls an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if `layout` is 'NTC',
or (length, batch_size, ...) if `layout` is 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, optional
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if `None`.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If `False`, returns outputs as a list of Symbols.
If `True`, concatenates output across time steps
and returns a single symbol with shape
(batch_size, length, ...) if layout is 'NTC',
or (length, batch_size, ...) if layout is 'TNC'.
If `None`, output whatever is faster.
valid_length : Symbol, NDArray or None
`valid_length` specifies the length of the sequences in the batch without padding.
This option is especially useful for building sequence-to-sequence models where
the input and output sequences would potentially be padded.
If `valid_length` is None, all sequences are assumed to have the same length.
If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,).
The ith element will be the length of the ith sequence in the batch.
The last valid state will be return and the padded outputs will be masked with 0.
Note that `valid_length` must be smaller or equal to `length`.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
"""
# pylint: disable=too-many-locals
self.reset()
(inputs, axis, F, batch_size) = _format_sequence(length, inputs, layout, False)
begin_state = _get_begin_state(self, F, begin_state, inputs, batch_size)
states = begin_state
outputs = []
all_states = []
for i in range(length):
(output, states) = self(inputs[i], states)
outputs.append(output)
if valid_length is not None:
all_states.append(states) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if valid_length is not None:
states = [F.SequenceLast(F.stack(*ele_list, axis=0), sequence_length=valid_length, use_sequence_length=True, axis=0) for ele_list in zip(*all_states)]
outputs = _mask_sequence_variable_length(F, outputs, length, valid_length, axis, True) # depends on [control=['if'], data=['valid_length']]
(outputs, _, _, _) = _format_sequence(length, outputs, layout, merge_outputs)
return (outputs, states) |
def check_read_permission(self, user_id, do_raise=True):
"""
Check whether this user can read this network
"""
if _is_admin(user_id):
return True
if int(self.created_by) == int(user_id):
return True
for owner in self.owners:
if int(owner.user_id) == int(user_id):
if owner.view == 'Y':
break
else:
if do_raise is True:
raise PermissionError("Permission denied. User %s does not have read"
" access on network %s" %
(user_id, self.id))
else:
return False
return True | def function[check_read_permission, parameter[self, user_id, do_raise]]:
constant[
Check whether this user can read this network
]
if call[name[_is_admin], parameter[name[user_id]]] begin[:]
return[constant[True]]
if compare[call[name[int], parameter[name[self].created_by]] equal[==] call[name[int], parameter[name[user_id]]]] begin[:]
return[constant[True]]
for taget[name[owner]] in starred[name[self].owners] begin[:]
if compare[call[name[int], parameter[name[owner].user_id]] equal[==] call[name[int], parameter[name[user_id]]]] begin[:]
if compare[name[owner].view equal[==] constant[Y]] begin[:]
break
return[constant[True]] | keyword[def] identifier[check_read_permission] ( identifier[self] , identifier[user_id] , identifier[do_raise] = keyword[True] ):
literal[string]
keyword[if] identifier[_is_admin] ( identifier[user_id] ):
keyword[return] keyword[True]
keyword[if] identifier[int] ( identifier[self] . identifier[created_by] )== identifier[int] ( identifier[user_id] ):
keyword[return] keyword[True]
keyword[for] identifier[owner] keyword[in] identifier[self] . identifier[owners] :
keyword[if] identifier[int] ( identifier[owner] . identifier[user_id] )== identifier[int] ( identifier[user_id] ):
keyword[if] identifier[owner] . identifier[view] == literal[string] :
keyword[break]
keyword[else] :
keyword[if] identifier[do_raise] keyword[is] keyword[True] :
keyword[raise] identifier[PermissionError] ( literal[string]
literal[string] %
( identifier[user_id] , identifier[self] . identifier[id] ))
keyword[else] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def check_read_permission(self, user_id, do_raise=True):
"""
Check whether this user can read this network
"""
if _is_admin(user_id):
return True # depends on [control=['if'], data=[]]
if int(self.created_by) == int(user_id):
return True # depends on [control=['if'], data=[]]
for owner in self.owners:
if int(owner.user_id) == int(user_id):
if owner.view == 'Y':
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['owner']]
else:
if do_raise is True:
raise PermissionError('Permission denied. User %s does not have read access on network %s' % (user_id, self.id)) # depends on [control=['if'], data=[]]
else:
return False
return True |
def _X_selected(X, selected):
"""Split X into selected features and other features"""
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
non_sel = np.logical_not(sel)
n_selected = np.sum(sel)
X_sel = X[:, ind[sel]]
X_not_sel = X[:, ind[non_sel]]
return X_sel, X_not_sel, n_selected, n_features | def function[_X_selected, parameter[X, selected]]:
constant[Split X into selected features and other features]
variable[n_features] assign[=] call[name[X].shape][constant[1]]
variable[ind] assign[=] call[name[np].arange, parameter[name[n_features]]]
variable[sel] assign[=] call[name[np].zeros, parameter[name[n_features]]]
call[name[sel]][call[name[np].asarray, parameter[name[selected]]]] assign[=] constant[True]
variable[non_sel] assign[=] call[name[np].logical_not, parameter[name[sel]]]
variable[n_selected] assign[=] call[name[np].sum, parameter[name[sel]]]
variable[X_sel] assign[=] call[name[X]][tuple[[<ast.Slice object at 0x7da1b23463b0>, <ast.Subscript object at 0x7da1b2344910>]]]
variable[X_not_sel] assign[=] call[name[X]][tuple[[<ast.Slice object at 0x7da1b2345450>, <ast.Subscript object at 0x7da1b2344a90>]]]
return[tuple[[<ast.Name object at 0x7da1b23479d0>, <ast.Name object at 0x7da1b23468c0>, <ast.Name object at 0x7da1b2344af0>, <ast.Name object at 0x7da1b2344460>]]] | keyword[def] identifier[_X_selected] ( identifier[X] , identifier[selected] ):
literal[string]
identifier[n_features] = identifier[X] . identifier[shape] [ literal[int] ]
identifier[ind] = identifier[np] . identifier[arange] ( identifier[n_features] )
identifier[sel] = identifier[np] . identifier[zeros] ( identifier[n_features] , identifier[dtype] = identifier[bool] )
identifier[sel] [ identifier[np] . identifier[asarray] ( identifier[selected] )]= keyword[True]
identifier[non_sel] = identifier[np] . identifier[logical_not] ( identifier[sel] )
identifier[n_selected] = identifier[np] . identifier[sum] ( identifier[sel] )
identifier[X_sel] = identifier[X] [:, identifier[ind] [ identifier[sel] ]]
identifier[X_not_sel] = identifier[X] [:, identifier[ind] [ identifier[non_sel] ]]
keyword[return] identifier[X_sel] , identifier[X_not_sel] , identifier[n_selected] , identifier[n_features] | def _X_selected(X, selected):
"""Split X into selected features and other features"""
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
non_sel = np.logical_not(sel)
n_selected = np.sum(sel)
X_sel = X[:, ind[sel]]
X_not_sel = X[:, ind[non_sel]]
return (X_sel, X_not_sel, n_selected, n_features) |
async def create(cls, destination: Union[int, Subnet],
source: Union[int, Subnet], gateway_ip: str, metric: int):
"""
Create a `StaticRoute` in MAAS.
:param name: The name of the `StaticRoute` (optional, will be given a
default value if not specified).
:type name: `str`
:param description: A description of the `StaticRoute` (optional).
:type description: `str`
:param class_type: The class type of the `StaticRoute` (optional).
:type class_type: `str`
:returns: The created StaticRoute
:rtype: `StaticRoute`
"""
params = {
"gateway_ip": gateway_ip,
"metric": metric,
}
if isinstance(source, Subnet):
params["source"] = source.id
elif isinstance(source, int):
params["source"] = source
if isinstance(destination, Subnet):
params["destination"] = destination.id
elif isinstance(destination, int):
params["destination"] = destination
return cls._object(await cls._handler.create(**params)) | <ast.AsyncFunctionDef object at 0x7da1b26afd00> | keyword[async] keyword[def] identifier[create] ( identifier[cls] , identifier[destination] : identifier[Union] [ identifier[int] , identifier[Subnet] ],
identifier[source] : identifier[Union] [ identifier[int] , identifier[Subnet] ], identifier[gateway_ip] : identifier[str] , identifier[metric] : identifier[int] ):
literal[string]
identifier[params] ={
literal[string] : identifier[gateway_ip] ,
literal[string] : identifier[metric] ,
}
keyword[if] identifier[isinstance] ( identifier[source] , identifier[Subnet] ):
identifier[params] [ literal[string] ]= identifier[source] . identifier[id]
keyword[elif] identifier[isinstance] ( identifier[source] , identifier[int] ):
identifier[params] [ literal[string] ]= identifier[source]
keyword[if] identifier[isinstance] ( identifier[destination] , identifier[Subnet] ):
identifier[params] [ literal[string] ]= identifier[destination] . identifier[id]
keyword[elif] identifier[isinstance] ( identifier[destination] , identifier[int] ):
identifier[params] [ literal[string] ]= identifier[destination]
keyword[return] identifier[cls] . identifier[_object] ( keyword[await] identifier[cls] . identifier[_handler] . identifier[create] (** identifier[params] )) | async def create(cls, destination: Union[int, Subnet], source: Union[int, Subnet], gateway_ip: str, metric: int):
"""
Create a `StaticRoute` in MAAS.
:param name: The name of the `StaticRoute` (optional, will be given a
default value if not specified).
:type name: `str`
:param description: A description of the `StaticRoute` (optional).
:type description: `str`
:param class_type: The class type of the `StaticRoute` (optional).
:type class_type: `str`
:returns: The created StaticRoute
:rtype: `StaticRoute`
"""
params = {'gateway_ip': gateway_ip, 'metric': metric}
if isinstance(source, Subnet):
params['source'] = source.id # depends on [control=['if'], data=[]]
elif isinstance(source, int):
params['source'] = source # depends on [control=['if'], data=[]]
if isinstance(destination, Subnet):
params['destination'] = destination.id # depends on [control=['if'], data=[]]
elif isinstance(destination, int):
params['destination'] = destination # depends on [control=['if'], data=[]]
return cls._object(await cls._handler.create(**params)) |
def validate_files(self, file_list, validator_image, original_validation_id="", environment={}):
"""
Invoke supplied validator Docker image and give it access to the file/s.
The validator must be based off the base validator Docker image.
:param list file_list: A list of files within the Upload Area to be validated
:param str validator_image: the location of a docker image to use for validation
:param str original_validation_id: [optional]
:param dict environment: [optional] list of environment variable to set for the validator
:return: ID of scheduled validation
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
return self.upload_service.api_client.validate_files(area_uuid=self.uuid,
file_list=file_list,
validator_image=validator_image,
original_validation_id=original_validation_id,
environment=environment) | def function[validate_files, parameter[self, file_list, validator_image, original_validation_id, environment]]:
constant[
Invoke supplied validator Docker image and give it access to the file/s.
The validator must be based off the base validator Docker image.
:param list file_list: A list of files within the Upload Area to be validated
:param str validator_image: the location of a docker image to use for validation
:param str original_validation_id: [optional]
:param dict environment: [optional] list of environment variable to set for the validator
:return: ID of scheduled validation
:rtype: dict
:raises UploadApiException: if information could not be obtained
]
return[call[name[self].upload_service.api_client.validate_files, parameter[]]] | keyword[def] identifier[validate_files] ( identifier[self] , identifier[file_list] , identifier[validator_image] , identifier[original_validation_id] = literal[string] , identifier[environment] ={}):
literal[string]
keyword[return] identifier[self] . identifier[upload_service] . identifier[api_client] . identifier[validate_files] ( identifier[area_uuid] = identifier[self] . identifier[uuid] ,
identifier[file_list] = identifier[file_list] ,
identifier[validator_image] = identifier[validator_image] ,
identifier[original_validation_id] = identifier[original_validation_id] ,
identifier[environment] = identifier[environment] ) | def validate_files(self, file_list, validator_image, original_validation_id='', environment={}):
"""
Invoke supplied validator Docker image and give it access to the file/s.
The validator must be based off the base validator Docker image.
:param list file_list: A list of files within the Upload Area to be validated
:param str validator_image: the location of a docker image to use for validation
:param str original_validation_id: [optional]
:param dict environment: [optional] list of environment variable to set for the validator
:return: ID of scheduled validation
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
return self.upload_service.api_client.validate_files(area_uuid=self.uuid, file_list=file_list, validator_image=validator_image, original_validation_id=original_validation_id, environment=environment) |
def _get_dvs_link_discovery_protocol(dvs_name, dvs_link_disc_protocol):
'''
Returns the dict representation of the DVS link discovery protocol
dvs_name
The name of the DVS
dvs_link_disc_protocl
The DVS link discovery protocol
'''
log.trace('Building the dict of the DVS \'%s\' link discovery protocol', dvs_name)
return {'operation': dvs_link_disc_protocol.operation,
'protocol': dvs_link_disc_protocol.protocol} | def function[_get_dvs_link_discovery_protocol, parameter[dvs_name, dvs_link_disc_protocol]]:
constant[
Returns the dict representation of the DVS link discovery protocol
dvs_name
The name of the DVS
dvs_link_disc_protocl
The DVS link discovery protocol
]
call[name[log].trace, parameter[constant[Building the dict of the DVS '%s' link discovery protocol], name[dvs_name]]]
return[dictionary[[<ast.Constant object at 0x7da2041d88b0>, <ast.Constant object at 0x7da2041d83d0>], [<ast.Attribute object at 0x7da2041db100>, <ast.Attribute object at 0x7da2041db6a0>]]] | keyword[def] identifier[_get_dvs_link_discovery_protocol] ( identifier[dvs_name] , identifier[dvs_link_disc_protocol] ):
literal[string]
identifier[log] . identifier[trace] ( literal[string] , identifier[dvs_name] )
keyword[return] { literal[string] : identifier[dvs_link_disc_protocol] . identifier[operation] ,
literal[string] : identifier[dvs_link_disc_protocol] . identifier[protocol] } | def _get_dvs_link_discovery_protocol(dvs_name, dvs_link_disc_protocol):
"""
Returns the dict representation of the DVS link discovery protocol
dvs_name
The name of the DVS
dvs_link_disc_protocl
The DVS link discovery protocol
"""
log.trace("Building the dict of the DVS '%s' link discovery protocol", dvs_name)
return {'operation': dvs_link_disc_protocol.operation, 'protocol': dvs_link_disc_protocol.protocol} |
def get_data_length(self):
# type: () -> int
'''
A method to get the length of the data that this Directory Record
points to.
Parameters:
None.
Returns:
The length of the data that this Directory Record points to.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
if self.inode is not None:
return self.inode.get_data_length()
return self.data_length | def function[get_data_length, parameter[self]]:
constant[
A method to get the length of the data that this Directory Record
points to.
Parameters:
None.
Returns:
The length of the data that this Directory Record points to.
]
if <ast.UnaryOp object at 0x7da1b0fce3b0> begin[:]
<ast.Raise object at 0x7da1b0fce8f0>
if compare[name[self].inode is_not constant[None]] begin[:]
return[call[name[self].inode.get_data_length, parameter[]]]
return[name[self].data_length] | keyword[def] identifier[get_data_length] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_initialized] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
keyword[if] identifier[self] . identifier[inode] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[inode] . identifier[get_data_length] ()
keyword[return] identifier[self] . identifier[data_length] | def get_data_length(self):
# type: () -> int
'\n A method to get the length of the data that this Directory Record\n points to.\n\n Parameters:\n None.\n Returns:\n The length of the data that this Directory Record points to.\n '
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized') # depends on [control=['if'], data=[]]
if self.inode is not None:
return self.inode.get_data_length() # depends on [control=['if'], data=[]]
return self.data_length |
def get_variables(args):
"""
Return a dictionary of variables specified at CLI
:param: args: Command Line Arguments namespace
"""
variables_dict = {}
if args.variables:
for var in args.variables:
words = var.split('=')
variables_dict[words[0]] = words[1]
return variables_dict | def function[get_variables, parameter[args]]:
constant[
Return a dictionary of variables specified at CLI
:param: args: Command Line Arguments namespace
]
variable[variables_dict] assign[=] dictionary[[], []]
if name[args].variables begin[:]
for taget[name[var]] in starred[name[args].variables] begin[:]
variable[words] assign[=] call[name[var].split, parameter[constant[=]]]
call[name[variables_dict]][call[name[words]][constant[0]]] assign[=] call[name[words]][constant[1]]
return[name[variables_dict]] | keyword[def] identifier[get_variables] ( identifier[args] ):
literal[string]
identifier[variables_dict] ={}
keyword[if] identifier[args] . identifier[variables] :
keyword[for] identifier[var] keyword[in] identifier[args] . identifier[variables] :
identifier[words] = identifier[var] . identifier[split] ( literal[string] )
identifier[variables_dict] [ identifier[words] [ literal[int] ]]= identifier[words] [ literal[int] ]
keyword[return] identifier[variables_dict] | def get_variables(args):
"""
Return a dictionary of variables specified at CLI
:param: args: Command Line Arguments namespace
"""
variables_dict = {}
if args.variables:
for var in args.variables:
words = var.split('=')
variables_dict[words[0]] = words[1] # depends on [control=['for'], data=['var']] # depends on [control=['if'], data=[]]
return variables_dict |
def from_json(cls, data):
"""Create a location from a dictionary.
Args:
data: {
"city": "-",
"latitude": 0,
"longitude": 0,
"time_zone": 0,
"elevation": 0}
"""
optional_keys = ('city', 'state', 'country', 'latitude', 'longitude',
'time_zone', 'elevation', 'station_id', 'source')
for key in optional_keys:
if key not in data:
data[key] = None
return cls(data['city'], data['state'], data['country'], data['latitude'],
data['longitude'], data['time_zone'], data['elevation'],
data['station_id'], data['source']) | def function[from_json, parameter[cls, data]]:
constant[Create a location from a dictionary.
Args:
data: {
"city": "-",
"latitude": 0,
"longitude": 0,
"time_zone": 0,
"elevation": 0}
]
variable[optional_keys] assign[=] tuple[[<ast.Constant object at 0x7da1b12a8520>, <ast.Constant object at 0x7da1b12ab490>, <ast.Constant object at 0x7da1b12a96f0>, <ast.Constant object at 0x7da1b12aa5f0>, <ast.Constant object at 0x7da1b12a9690>, <ast.Constant object at 0x7da1b12a84f0>, <ast.Constant object at 0x7da1b12a8f70>, <ast.Constant object at 0x7da1b12ab850>, <ast.Constant object at 0x7da1b12a8850>]]
for taget[name[key]] in starred[name[optional_keys]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:]
call[name[data]][name[key]] assign[=] constant[None]
return[call[name[cls], parameter[call[name[data]][constant[city]], call[name[data]][constant[state]], call[name[data]][constant[country]], call[name[data]][constant[latitude]], call[name[data]][constant[longitude]], call[name[data]][constant[time_zone]], call[name[data]][constant[elevation]], call[name[data]][constant[station_id]], call[name[data]][constant[source]]]]] | keyword[def] identifier[from_json] ( identifier[cls] , identifier[data] ):
literal[string]
identifier[optional_keys] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] )
keyword[for] identifier[key] keyword[in] identifier[optional_keys] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[data] :
identifier[data] [ identifier[key] ]= keyword[None]
keyword[return] identifier[cls] ( identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ], identifier[data] [ literal[string] ]) | def from_json(cls, data):
"""Create a location from a dictionary.
Args:
data: {
"city": "-",
"latitude": 0,
"longitude": 0,
"time_zone": 0,
"elevation": 0}
"""
optional_keys = ('city', 'state', 'country', 'latitude', 'longitude', 'time_zone', 'elevation', 'station_id', 'source')
for key in optional_keys:
if key not in data:
data[key] = None # depends on [control=['if'], data=['key', 'data']] # depends on [control=['for'], data=['key']]
return cls(data['city'], data['state'], data['country'], data['latitude'], data['longitude'], data['time_zone'], data['elevation'], data['station_id'], data['source']) |
def create_sas_token(key_name, shared_access_key, scope, expiry=timedelta(hours=1)):
"""Create a SAS token.
:param key_name: The username/key name/policy name for the token.
:type key_name: bytes
:param shared_access_key: The shared access key to generate the token from.
:type shared_access_key: bytes
:param scope: The token permissions scope.
:type scope: bytes
:param expiry: The lifetime of the generated token. Default is 1 hour.
:type expiry: ~datetime.timedelta
:rtype: bytes
"""
shared_access_key = base64.b64encode(shared_access_key)
abs_expiry = int(time.time()) + expiry.seconds
return c_uamqp.create_sas_token(shared_access_key, scope, key_name, abs_expiry) | def function[create_sas_token, parameter[key_name, shared_access_key, scope, expiry]]:
constant[Create a SAS token.
:param key_name: The username/key name/policy name for the token.
:type key_name: bytes
:param shared_access_key: The shared access key to generate the token from.
:type shared_access_key: bytes
:param scope: The token permissions scope.
:type scope: bytes
:param expiry: The lifetime of the generated token. Default is 1 hour.
:type expiry: ~datetime.timedelta
:rtype: bytes
]
variable[shared_access_key] assign[=] call[name[base64].b64encode, parameter[name[shared_access_key]]]
variable[abs_expiry] assign[=] binary_operation[call[name[int], parameter[call[name[time].time, parameter[]]]] + name[expiry].seconds]
return[call[name[c_uamqp].create_sas_token, parameter[name[shared_access_key], name[scope], name[key_name], name[abs_expiry]]]] | keyword[def] identifier[create_sas_token] ( identifier[key_name] , identifier[shared_access_key] , identifier[scope] , identifier[expiry] = identifier[timedelta] ( identifier[hours] = literal[int] )):
literal[string]
identifier[shared_access_key] = identifier[base64] . identifier[b64encode] ( identifier[shared_access_key] )
identifier[abs_expiry] = identifier[int] ( identifier[time] . identifier[time] ())+ identifier[expiry] . identifier[seconds]
keyword[return] identifier[c_uamqp] . identifier[create_sas_token] ( identifier[shared_access_key] , identifier[scope] , identifier[key_name] , identifier[abs_expiry] ) | def create_sas_token(key_name, shared_access_key, scope, expiry=timedelta(hours=1)):
"""Create a SAS token.
:param key_name: The username/key name/policy name for the token.
:type key_name: bytes
:param shared_access_key: The shared access key to generate the token from.
:type shared_access_key: bytes
:param scope: The token permissions scope.
:type scope: bytes
:param expiry: The lifetime of the generated token. Default is 1 hour.
:type expiry: ~datetime.timedelta
:rtype: bytes
"""
shared_access_key = base64.b64encode(shared_access_key)
abs_expiry = int(time.time()) + expiry.seconds
return c_uamqp.create_sas_token(shared_access_key, scope, key_name, abs_expiry) |
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request) | def function[register, parameter[self, metadata]]:
constant[
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
]
call[name[self].check_credentials, parameter[]]
call[name[metadata].validate, parameter[]]
variable[d] assign[=] call[name[metadata].todict, parameter[]]
call[name[d]][constant[:action]] assign[=] constant[verify]
variable[request] assign[=] call[name[self].encode_request, parameter[call[name[d].items, parameter[]], list[[]]]]
variable[response] assign[=] call[name[self].send_request, parameter[name[request]]]
call[name[d]][constant[:action]] assign[=] constant[submit]
variable[request] assign[=] call[name[self].encode_request, parameter[call[name[d].items, parameter[]], list[[]]]]
return[call[name[self].send_request, parameter[name[request]]]] | keyword[def] identifier[register] ( identifier[self] , identifier[metadata] ):
literal[string]
identifier[self] . identifier[check_credentials] ()
identifier[metadata] . identifier[validate] ()
identifier[d] = identifier[metadata] . identifier[todict] ()
identifier[d] [ literal[string] ]= literal[string]
identifier[request] = identifier[self] . identifier[encode_request] ( identifier[d] . identifier[items] (),[])
identifier[response] = identifier[self] . identifier[send_request] ( identifier[request] )
identifier[d] [ literal[string] ]= literal[string]
identifier[request] = identifier[self] . identifier[encode_request] ( identifier[d] . identifier[items] (),[])
keyword[return] identifier[self] . identifier[send_request] ( identifier[request] ) | def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request) |
def get(
self, app_id, metric_id, timespan=None, interval=None, aggregation=None, segment=None, top=None, orderby=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Retrieve metric data.
Gets metric values for a single metric.
:param app_id: ID of the application. This is Application ID from the
API Access settings blade in the Azure portal.
:type app_id: str
:param metric_id: ID of the metric. This is either a standard AI
metric, or an application-specific custom metric. Possible values
include: 'requests/count', 'requests/duration', 'requests/failed',
'users/count', 'users/authenticated', 'pageViews/count',
'pageViews/duration', 'client/processingDuration',
'client/receiveDuration', 'client/networkDuration',
'client/sendDuration', 'client/totalDuration', 'dependencies/count',
'dependencies/failed', 'dependencies/duration', 'exceptions/count',
'exceptions/browser', 'exceptions/server', 'sessions/count',
'performanceCounters/requestExecutionTime',
'performanceCounters/requestsPerSecond',
'performanceCounters/requestsInQueue',
'performanceCounters/memoryAvailableBytes',
'performanceCounters/exceptionsPerSecond',
'performanceCounters/processCpuPercentage',
'performanceCounters/processIOBytesPerSecond',
'performanceCounters/processPrivateBytes',
'performanceCounters/processorCpuPercentage',
'availabilityResults/availabilityPercentage',
'availabilityResults/duration', 'billing/telemetryCount',
'customEvents/count'
:type metric_id: str or ~azure.applicationinsights.models.MetricId
:param timespan: The timespan over which to retrieve metric values.
This is an ISO8601 time period value. If timespan is omitted, a
default time range of `PT12H` ("last 12 hours") is used. The actual
timespan that is queried may be adjusted by the server based. In all
cases, the actual time span used for the query is included in the
response.
:type timespan: str
:param interval: The time interval to use when retrieving metric
values. This is an ISO8601 duration. If interval is omitted, the
metric value is aggregated across the entire timespan. If interval is
supplied, the server may adjust the interval to a more appropriate
size based on the timespan used for the query. In all cases, the
actual interval used for the query is included in the response.
:type interval: timedelta
:param aggregation: The aggregation to use when computing the metric
values. To retrieve more than one aggregation at a time, separate them
with a comma. If no aggregation is specified, then the default
aggregation for the metric is used.
:type aggregation: list[str or
~azure.applicationinsights.models.MetricsAggregation]
:param segment: The name of the dimension to segment the metric values
by. This dimension must be applicable to the metric you are
retrieving. To segment by more than one dimension at a time, separate
them with a comma (,). In this case, the metric data will be segmented
in the order the dimensions are listed in the parameter.
:type segment: list[str or
~azure.applicationinsights.models.MetricsSegment]
:param top: The number of segments to return. This value is only
valid when segment is specified.
:type top: int
:param orderby: The aggregation function and direction to sort the
segments by. This value is only valid when segment is specified.
:type orderby: str
:param filter: An expression used to filter the results. This value
should be a valid OData filter expression where the keys of each
clause should be applicable dimensions for the metric you are
retrieving.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: MetricsResult or ClientRawResponse if raw=true
:rtype: ~azure.applicationinsights.models.MetricsResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.applicationinsights.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'appId': self._serialize.url("app_id", app_id, 'str'),
'metricId': self._serialize.url("metric_id", metric_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timespan is not None:
query_parameters['timespan'] = self._serialize.query("timespan", timespan, 'str')
if interval is not None:
query_parameters['interval'] = self._serialize.query("interval", interval, 'duration')
if aggregation is not None:
query_parameters['aggregation'] = self._serialize.query("aggregation", aggregation, '[MetricsAggregation]', div=',', min_items=1)
if segment is not None:
query_parameters['segment'] = self._serialize.query("segment", segment, '[str]', div=',', min_items=1)
if top is not None:
query_parameters['top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['orderby'] = self._serialize.query("orderby", orderby, 'str')
if filter is not None:
query_parameters['filter'] = self._serialize.query("filter", filter, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('MetricsResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | def function[get, parameter[self, app_id, metric_id, timespan, interval, aggregation, segment, top, orderby, filter, custom_headers, raw]]:
constant[Retrieve metric data.
Gets metric values for a single metric.
:param app_id: ID of the application. This is Application ID from the
API Access settings blade in the Azure portal.
:type app_id: str
:param metric_id: ID of the metric. This is either a standard AI
metric, or an application-specific custom metric. Possible values
include: 'requests/count', 'requests/duration', 'requests/failed',
'users/count', 'users/authenticated', 'pageViews/count',
'pageViews/duration', 'client/processingDuration',
'client/receiveDuration', 'client/networkDuration',
'client/sendDuration', 'client/totalDuration', 'dependencies/count',
'dependencies/failed', 'dependencies/duration', 'exceptions/count',
'exceptions/browser', 'exceptions/server', 'sessions/count',
'performanceCounters/requestExecutionTime',
'performanceCounters/requestsPerSecond',
'performanceCounters/requestsInQueue',
'performanceCounters/memoryAvailableBytes',
'performanceCounters/exceptionsPerSecond',
'performanceCounters/processCpuPercentage',
'performanceCounters/processIOBytesPerSecond',
'performanceCounters/processPrivateBytes',
'performanceCounters/processorCpuPercentage',
'availabilityResults/availabilityPercentage',
'availabilityResults/duration', 'billing/telemetryCount',
'customEvents/count'
:type metric_id: str or ~azure.applicationinsights.models.MetricId
:param timespan: The timespan over which to retrieve metric values.
This is an ISO8601 time period value. If timespan is omitted, a
default time range of `PT12H` ("last 12 hours") is used. The actual
timespan that is queried may be adjusted by the server based. In all
cases, the actual time span used for the query is included in the
response.
:type timespan: str
:param interval: The time interval to use when retrieving metric
values. This is an ISO8601 duration. If interval is omitted, the
metric value is aggregated across the entire timespan. If interval is
supplied, the server may adjust the interval to a more appropriate
size based on the timespan used for the query. In all cases, the
actual interval used for the query is included in the response.
:type interval: timedelta
:param aggregation: The aggregation to use when computing the metric
values. To retrieve more than one aggregation at a time, separate them
with a comma. If no aggregation is specified, then the default
aggregation for the metric is used.
:type aggregation: list[str or
~azure.applicationinsights.models.MetricsAggregation]
:param segment: The name of the dimension to segment the metric values
by. This dimension must be applicable to the metric you are
retrieving. To segment by more than one dimension at a time, separate
them with a comma (,). In this case, the metric data will be segmented
in the order the dimensions are listed in the parameter.
:type segment: list[str or
~azure.applicationinsights.models.MetricsSegment]
:param top: The number of segments to return. This value is only
valid when segment is specified.
:type top: int
:param orderby: The aggregation function and direction to sort the
segments by. This value is only valid when segment is specified.
:type orderby: str
:param filter: An expression used to filter the results. This value
should be a valid OData filter expression where the keys of each
clause should be applicable dimensions for the metric you are
retrieving.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: MetricsResult or ClientRawResponse if raw=true
:rtype: ~azure.applicationinsights.models.MetricsResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.applicationinsights.models.ErrorResponseException>`
]
variable[url] assign[=] call[name[self].get.metadata][constant[url]]
variable[path_format_arguments] assign[=] dictionary[[<ast.Constant object at 0x7da2043442b0>, <ast.Constant object at 0x7da204345450>], [<ast.Call object at 0x7da2043454b0>, <ast.Call object at 0x7da204344cd0>]]
variable[url] assign[=] call[name[self]._client.format_url, parameter[name[url]]]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[timespan] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[timespan]] assign[=] call[name[self]._serialize.query, parameter[constant[timespan], name[timespan], constant[str]]]
if compare[name[interval] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[interval]] assign[=] call[name[self]._serialize.query, parameter[constant[interval], name[interval], constant[duration]]]
if compare[name[aggregation] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[aggregation]] assign[=] call[name[self]._serialize.query, parameter[constant[aggregation], name[aggregation], constant[[MetricsAggregation]]]]
if compare[name[segment] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[segment]] assign[=] call[name[self]._serialize.query, parameter[constant[segment], name[segment], constant[[str]]]]
if compare[name[top] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[top]] assign[=] call[name[self]._serialize.query, parameter[constant[top], name[top], constant[int]]]
if compare[name[orderby] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[orderby]] assign[=] call[name[self]._serialize.query, parameter[constant[orderby], name[orderby], constant[str]]]
if compare[name[filter] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[filter]] assign[=] call[name[self]._serialize.query, parameter[constant[filter], name[filter], constant[str]]]
variable[header_parameters] assign[=] dictionary[[], []]
call[name[header_parameters]][constant[Accept]] assign[=] constant[application/json]
if name[custom_headers] begin[:]
call[name[header_parameters].update, parameter[name[custom_headers]]]
variable[request] assign[=] call[name[self]._client.get, parameter[name[url], name[query_parameters], name[header_parameters]]]
variable[response] assign[=] call[name[self]._client.send, parameter[name[request]]]
if compare[name[response].status_code <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da2054a58d0>]]] begin[:]
<ast.Raise object at 0x7da2054a7f40>
variable[deserialized] assign[=] constant[None]
if compare[name[response].status_code equal[==] constant[200]] begin[:]
variable[deserialized] assign[=] call[name[self]._deserialize, parameter[constant[MetricsResult], name[response]]]
if name[raw] begin[:]
variable[client_raw_response] assign[=] call[name[ClientRawResponse], parameter[name[deserialized], name[response]]]
return[name[client_raw_response]]
return[name[deserialized]] | keyword[def] identifier[get] (
identifier[self] , identifier[app_id] , identifier[metric_id] , identifier[timespan] = keyword[None] , identifier[interval] = keyword[None] , identifier[aggregation] = keyword[None] , identifier[segment] = keyword[None] , identifier[top] = keyword[None] , identifier[orderby] = keyword[None] , identifier[filter] = keyword[None] , identifier[custom_headers] = keyword[None] , identifier[raw] = keyword[False] ,** identifier[operation_config] ):
literal[string]
identifier[url] = identifier[self] . identifier[get] . identifier[metadata] [ literal[string] ]
identifier[path_format_arguments] ={
literal[string] : identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[app_id] , literal[string] ),
literal[string] : identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[metric_id] , literal[string] )
}
identifier[url] = identifier[self] . identifier[_client] . identifier[format_url] ( identifier[url] ,** identifier[path_format_arguments] )
identifier[query_parameters] ={}
keyword[if] identifier[timespan] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[timespan] , literal[string] )
keyword[if] identifier[interval] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[interval] , literal[string] )
keyword[if] identifier[aggregation] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[aggregation] , literal[string] , identifier[div] = literal[string] , identifier[min_items] = literal[int] )
keyword[if] identifier[segment] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[segment] , literal[string] , identifier[div] = literal[string] , identifier[min_items] = literal[int] )
keyword[if] identifier[top] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[top] , literal[string] )
keyword[if] identifier[orderby] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[orderby] , literal[string] )
keyword[if] identifier[filter] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[filter] , literal[string] )
identifier[header_parameters] ={}
identifier[header_parameters] [ literal[string] ]= literal[string]
keyword[if] identifier[custom_headers] :
identifier[header_parameters] . identifier[update] ( identifier[custom_headers] )
identifier[request] = identifier[self] . identifier[_client] . identifier[get] ( identifier[url] , identifier[query_parameters] , identifier[header_parameters] )
identifier[response] = identifier[self] . identifier[_client] . identifier[send] ( identifier[request] , identifier[stream] = keyword[False] ,** identifier[operation_config] )
keyword[if] identifier[response] . identifier[status_code] keyword[not] keyword[in] [ literal[int] ]:
keyword[raise] identifier[models] . identifier[ErrorResponseException] ( identifier[self] . identifier[_deserialize] , identifier[response] )
identifier[deserialized] = keyword[None]
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
identifier[deserialized] = identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] )
keyword[if] identifier[raw] :
identifier[client_raw_response] = identifier[ClientRawResponse] ( identifier[deserialized] , identifier[response] )
keyword[return] identifier[client_raw_response]
keyword[return] identifier[deserialized] | def get(self, app_id, metric_id, timespan=None, interval=None, aggregation=None, segment=None, top=None, orderby=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Retrieve metric data.
Gets metric values for a single metric.
:param app_id: ID of the application. This is Application ID from the
API Access settings blade in the Azure portal.
:type app_id: str
:param metric_id: ID of the metric. This is either a standard AI
metric, or an application-specific custom metric. Possible values
include: 'requests/count', 'requests/duration', 'requests/failed',
'users/count', 'users/authenticated', 'pageViews/count',
'pageViews/duration', 'client/processingDuration',
'client/receiveDuration', 'client/networkDuration',
'client/sendDuration', 'client/totalDuration', 'dependencies/count',
'dependencies/failed', 'dependencies/duration', 'exceptions/count',
'exceptions/browser', 'exceptions/server', 'sessions/count',
'performanceCounters/requestExecutionTime',
'performanceCounters/requestsPerSecond',
'performanceCounters/requestsInQueue',
'performanceCounters/memoryAvailableBytes',
'performanceCounters/exceptionsPerSecond',
'performanceCounters/processCpuPercentage',
'performanceCounters/processIOBytesPerSecond',
'performanceCounters/processPrivateBytes',
'performanceCounters/processorCpuPercentage',
'availabilityResults/availabilityPercentage',
'availabilityResults/duration', 'billing/telemetryCount',
'customEvents/count'
:type metric_id: str or ~azure.applicationinsights.models.MetricId
:param timespan: The timespan over which to retrieve metric values.
This is an ISO8601 time period value. If timespan is omitted, a
default time range of `PT12H` ("last 12 hours") is used. The actual
timespan that is queried may be adjusted by the server based. In all
cases, the actual time span used for the query is included in the
response.
:type timespan: str
:param interval: The time interval to use when retrieving metric
values. This is an ISO8601 duration. If interval is omitted, the
metric value is aggregated across the entire timespan. If interval is
supplied, the server may adjust the interval to a more appropriate
size based on the timespan used for the query. In all cases, the
actual interval used for the query is included in the response.
:type interval: timedelta
:param aggregation: The aggregation to use when computing the metric
values. To retrieve more than one aggregation at a time, separate them
with a comma. If no aggregation is specified, then the default
aggregation for the metric is used.
:type aggregation: list[str or
~azure.applicationinsights.models.MetricsAggregation]
:param segment: The name of the dimension to segment the metric values
by. This dimension must be applicable to the metric you are
retrieving. To segment by more than one dimension at a time, separate
them with a comma (,). In this case, the metric data will be segmented
in the order the dimensions are listed in the parameter.
:type segment: list[str or
~azure.applicationinsights.models.MetricsSegment]
:param top: The number of segments to return. This value is only
valid when segment is specified.
:type top: int
:param orderby: The aggregation function and direction to sort the
segments by. This value is only valid when segment is specified.
:type orderby: str
:param filter: An expression used to filter the results. This value
should be a valid OData filter expression where the keys of each
clause should be applicable dimensions for the metric you are
retrieving.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: MetricsResult or ClientRawResponse if raw=true
:rtype: ~azure.applicationinsights.models.MetricsResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.applicationinsights.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {'appId': self._serialize.url('app_id', app_id, 'str'), 'metricId': self._serialize.url('metric_id', metric_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timespan is not None:
query_parameters['timespan'] = self._serialize.query('timespan', timespan, 'str') # depends on [control=['if'], data=['timespan']]
if interval is not None:
query_parameters['interval'] = self._serialize.query('interval', interval, 'duration') # depends on [control=['if'], data=['interval']]
if aggregation is not None:
query_parameters['aggregation'] = self._serialize.query('aggregation', aggregation, '[MetricsAggregation]', div=',', min_items=1) # depends on [control=['if'], data=['aggregation']]
if segment is not None:
query_parameters['segment'] = self._serialize.query('segment', segment, '[str]', div=',', min_items=1) # depends on [control=['if'], data=['segment']]
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int') # depends on [control=['if'], data=['top']]
if orderby is not None:
query_parameters['orderby'] = self._serialize.query('orderby', orderby, 'str') # depends on [control=['if'], data=['orderby']]
if filter is not None:
query_parameters['filter'] = self._serialize.query('filter', filter, 'str') # depends on [control=['if'], data=['filter']]
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers) # depends on [control=['if'], data=[]]
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response) # depends on [control=['if'], data=[]]
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('MetricsResult', response) # depends on [control=['if'], data=[]]
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response # depends on [control=['if'], data=[]]
return deserialized |
def cuts_outside(self):
'''Report whether the enzyme cuts outside its recognition site.
Cutting at the very end of the site returns True.
:returns: Whether the enzyme will cut outside its recognition site.
:rtype: bool
'''
for index in self.cut_site:
if index < 0 or index > len(self.recognition_site) + 1:
return True
return False | def function[cuts_outside, parameter[self]]:
constant[Report whether the enzyme cuts outside its recognition site.
Cutting at the very end of the site returns True.
:returns: Whether the enzyme will cut outside its recognition site.
:rtype: bool
]
for taget[name[index]] in starred[name[self].cut_site] begin[:]
if <ast.BoolOp object at 0x7da20c76cac0> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[cuts_outside] ( identifier[self] ):
literal[string]
keyword[for] identifier[index] keyword[in] identifier[self] . identifier[cut_site] :
keyword[if] identifier[index] < literal[int] keyword[or] identifier[index] > identifier[len] ( identifier[self] . identifier[recognition_site] )+ literal[int] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def cuts_outside(self):
"""Report whether the enzyme cuts outside its recognition site.
Cutting at the very end of the site returns True.
:returns: Whether the enzyme will cut outside its recognition site.
:rtype: bool
"""
for index in self.cut_site:
if index < 0 or index > len(self.recognition_site) + 1:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']]
return False |
def present(name, value, delimiter=DEFAULT_TARGET_DELIM, force=False):
'''
Ensure that a grain is set
.. versionchanged:: v2015.8.2
name
The grain name
value
The value to set on the grain
force
If force is True, the existing grain will be overwritten
regardless of its existing or provided value type. Defaults to False
.. versionadded:: v2015.8.2
delimiter
A delimiter different from the default can be provided.
.. versionadded:: v2015.8.2
It is now capable to set a grain to a complex value (ie. lists and dicts)
and supports nested grains as well.
If the grain does not yet exist, a new grain is set to the given value. For
a nested grain, the necessary keys are created if they don't exist. If
a given key is an existing value, it will be converted, but an existing value
different from the given key will fail the state.
If the grain with the given name exists, its value is updated to the new
value unless its existing or provided value is complex (list or dict). Use
`force: True` to overwrite.
.. code-block:: yaml
cheese:
grains.present:
- value: edam
nested_grain_with_complex_value:
grains.present:
- name: icinga:Apache SSL
- value:
- command: check_https
- params: -H localhost -p 443 -S
with,a,custom,delimiter:
grains.present:
- value: yay
- delimiter: ','
'''
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
_non_existent = object()
existing = __salt__['grains.get'](name, _non_existent)
if existing == value:
ret['comment'] = 'Grain is already set'
return ret
if __opts__['test']:
ret['result'] = None
if existing is _non_existent:
ret['comment'] = 'Grain {0} is set to be added'.format(name)
ret['changes'] = {'new': name}
else:
ret['comment'] = 'Grain {0} is set to be changed'.format(name)
ret['changes'] = {'changed': {name: value}}
return ret
ret = __salt__['grains.set'](name, value, force=force)
if ret['result'] is True and ret['changes'] != {}:
ret['comment'] = 'Set grain {0} to {1}'.format(name, value)
ret['name'] = name
return ret | def function[present, parameter[name, value, delimiter, force]]:
constant[
Ensure that a grain is set
.. versionchanged:: v2015.8.2
name
The grain name
value
The value to set on the grain
force
If force is True, the existing grain will be overwritten
regardless of its existing or provided value type. Defaults to False
.. versionadded:: v2015.8.2
delimiter
A delimiter different from the default can be provided.
.. versionadded:: v2015.8.2
It is now capable to set a grain to a complex value (ie. lists and dicts)
and supports nested grains as well.
If the grain does not yet exist, a new grain is set to the given value. For
a nested grain, the necessary keys are created if they don't exist. If
a given key is an existing value, it will be converted, but an existing value
different from the given key will fail the state.
If the grain with the given name exists, its value is updated to the new
value unless its existing or provided value is complex (list or dict). Use
`force: True` to overwrite.
.. code-block:: yaml
cheese:
grains.present:
- value: edam
nested_grain_with_complex_value:
grains.present:
- name: icinga:Apache SSL
- value:
- command: check_https
- params: -H localhost -p 443 -S
with,a,custom,delimiter:
grains.present:
- value: yay
- delimiter: ','
]
variable[name] assign[=] call[name[re].sub, parameter[name[delimiter], name[DEFAULT_TARGET_DELIM], name[name]]]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b20008e0>, <ast.Constant object at 0x7da1b2001cf0>, <ast.Constant object at 0x7da1b2001000>, <ast.Constant object at 0x7da1b20010c0>], [<ast.Name object at 0x7da1b2001e10>, <ast.Dict object at 0x7da1b2003430>, <ast.Constant object at 0x7da1b2000340>, <ast.Constant object at 0x7da1b2001de0>]]
variable[_non_existent] assign[=] call[name[object], parameter[]]
variable[existing] assign[=] call[call[name[__salt__]][constant[grains.get]], parameter[name[name], name[_non_existent]]]
if compare[name[existing] equal[==] name[value]] begin[:]
call[name[ret]][constant[comment]] assign[=] constant[Grain is already set]
return[name[ret]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
if compare[name[existing] is name[_non_existent]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Grain {0} is set to be added].format, parameter[name[name]]]
call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da1b210a260>], [<ast.Name object at 0x7da1b210ace0>]]
return[name[ret]]
variable[ret] assign[=] call[call[name[__salt__]][constant[grains.set]], parameter[name[name], name[value]]]
if <ast.BoolOp object at 0x7da1b2108a90> begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Set grain {0} to {1}].format, parameter[name[name], name[value]]]
call[name[ret]][constant[name]] assign[=] name[name]
return[name[ret]] | keyword[def] identifier[present] ( identifier[name] , identifier[value] , identifier[delimiter] = identifier[DEFAULT_TARGET_DELIM] , identifier[force] = keyword[False] ):
literal[string]
identifier[name] = identifier[re] . identifier[sub] ( identifier[delimiter] , identifier[DEFAULT_TARGET_DELIM] , identifier[name] )
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[True] ,
literal[string] : literal[string] }
identifier[_non_existent] = identifier[object] ()
identifier[existing] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[_non_existent] )
keyword[if] identifier[existing] == identifier[value] :
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
keyword[if] identifier[existing] keyword[is] identifier[_non_existent] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]={ literal[string] : identifier[name] }
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]={ literal[string] :{ identifier[name] : identifier[value] }}
keyword[return] identifier[ret]
identifier[ret] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[value] , identifier[force] = identifier[force] )
keyword[if] identifier[ret] [ literal[string] ] keyword[is] keyword[True] keyword[and] identifier[ret] [ literal[string] ]!={}:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] , identifier[value] )
identifier[ret] [ literal[string] ]= identifier[name]
keyword[return] identifier[ret] | def present(name, value, delimiter=DEFAULT_TARGET_DELIM, force=False):
"""
Ensure that a grain is set
.. versionchanged:: v2015.8.2
name
The grain name
value
The value to set on the grain
force
If force is True, the existing grain will be overwritten
regardless of its existing or provided value type. Defaults to False
.. versionadded:: v2015.8.2
delimiter
A delimiter different from the default can be provided.
.. versionadded:: v2015.8.2
It is now capable to set a grain to a complex value (ie. lists and dicts)
and supports nested grains as well.
If the grain does not yet exist, a new grain is set to the given value. For
a nested grain, the necessary keys are created if they don't exist. If
a given key is an existing value, it will be converted, but an existing value
different from the given key will fail the state.
If the grain with the given name exists, its value is updated to the new
value unless its existing or provided value is complex (list or dict). Use
`force: True` to overwrite.
.. code-block:: yaml
cheese:
grains.present:
- value: edam
nested_grain_with_complex_value:
grains.present:
- name: icinga:Apache SSL
- value:
- command: check_https
- params: -H localhost -p 443 -S
with,a,custom,delimiter:
grains.present:
- value: yay
- delimiter: ','
"""
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
_non_existent = object()
existing = __salt__['grains.get'](name, _non_existent)
if existing == value:
ret['comment'] = 'Grain is already set'
return ret # depends on [control=['if'], data=[]]
if __opts__['test']:
ret['result'] = None
if existing is _non_existent:
ret['comment'] = 'Grain {0} is set to be added'.format(name)
ret['changes'] = {'new': name} # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Grain {0} is set to be changed'.format(name)
ret['changes'] = {'changed': {name: value}}
return ret # depends on [control=['if'], data=[]]
ret = __salt__['grains.set'](name, value, force=force)
if ret['result'] is True and ret['changes'] != {}:
ret['comment'] = 'Set grain {0} to {1}'.format(name, value) # depends on [control=['if'], data=[]]
ret['name'] = name
return ret |
def search(cls, session, queries):
"""Search for a conversation given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
"""
return super(Conversations, cls).search(
session, queries, SearchConversation,
) | def function[search, parameter[cls, session, queries]]:
constant[Search for a conversation given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
]
return[call[call[name[super], parameter[name[Conversations], name[cls]]].search, parameter[name[session], name[queries], name[SearchConversation]]]] | keyword[def] identifier[search] ( identifier[cls] , identifier[session] , identifier[queries] ):
literal[string]
keyword[return] identifier[super] ( identifier[Conversations] , identifier[cls] ). identifier[search] (
identifier[session] , identifier[queries] , identifier[SearchConversation] ,
) | def search(cls, session, queries):
"""Search for a conversation given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
"""
return super(Conversations, cls).search(session, queries, SearchConversation) |
def HMAC(self, key, str_):
"""The HMAC(key, str) function."""
# pylint: disable=C0103
return hmac.new(key, str_, self.hash_factory).digest() | def function[HMAC, parameter[self, key, str_]]:
constant[The HMAC(key, str) function.]
return[call[call[name[hmac].new, parameter[name[key], name[str_], name[self].hash_factory]].digest, parameter[]]] | keyword[def] identifier[HMAC] ( identifier[self] , identifier[key] , identifier[str_] ):
literal[string]
keyword[return] identifier[hmac] . identifier[new] ( identifier[key] , identifier[str_] , identifier[self] . identifier[hash_factory] ). identifier[digest] () | def HMAC(self, key, str_):
"""The HMAC(key, str) function."""
# pylint: disable=C0103
return hmac.new(key, str_, self.hash_factory).digest() |
def convert_new_publication_info_to_old(publication_infos):
"""Convert back a ``publication_info`` value from the new format to the old.
Does the inverse transformation of :func:`convert_old_publication_info_to_new`,
to be used whenever we are sending back records from Labs to Legacy.
Args:
publication_infos: a ``publication_info`` in the new format.
Returns:
list(dict): a ``publication_info`` in the old format.
"""
def _needs_a_hidden_pubnote(journal_title, journal_volume):
return (
journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and
journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE[journal_title]
)
result = []
for publication_info in publication_infos:
_publication_info = copy.deepcopy(publication_info)
journal_title = _publication_info.get('journal_title')
try:
journal_title = _JOURNALS_RENAMED_NEW_TO_OLD[journal_title]
_publication_info['journal_title'] = journal_title
result.append(_publication_info)
continue
except KeyError:
pass
journal_volume = _publication_info.get('journal_volume')
year = _publication_info.get('year')
if (journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and
journal_volume and len(journal_volume) == 2):
two_digit_year = str(year)[2:]
_publication_info['journal_volume'] = ''.join([two_digit_year, journal_volume])
result.append(_publication_info)
continue
if journal_title and journal_volume:
match = _RE_TITLE_ENDS_WITH_A_LETTER.match(journal_title)
if match and _needs_a_hidden_pubnote(journal_title, journal_volume):
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = journal_volume + match.group('letter')
result.append(_publication_info)
_publication_info = copy.deepcopy(publication_info)
_publication_info['hidden'] = True
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume
elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER:
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume
result.append(_publication_info)
return result | def function[convert_new_publication_info_to_old, parameter[publication_infos]]:
constant[Convert back a ``publication_info`` value from the new format to the old.
Does the inverse transformation of :func:`convert_old_publication_info_to_new`,
to be used whenever we are sending back records from Labs to Legacy.
Args:
publication_infos: a ``publication_info`` in the new format.
Returns:
list(dict): a ``publication_info`` in the old format.
]
def function[_needs_a_hidden_pubnote, parameter[journal_title, journal_volume]]:
return[<ast.BoolOp object at 0x7da1b242fa00>]
variable[result] assign[=] list[[]]
for taget[name[publication_info]] in starred[name[publication_infos]] begin[:]
variable[_publication_info] assign[=] call[name[copy].deepcopy, parameter[name[publication_info]]]
variable[journal_title] assign[=] call[name[_publication_info].get, parameter[constant[journal_title]]]
<ast.Try object at 0x7da1b242de40>
variable[journal_volume] assign[=] call[name[_publication_info].get, parameter[constant[journal_volume]]]
variable[year] assign[=] call[name[_publication_info].get, parameter[constant[year]]]
if <ast.BoolOp object at 0x7da1b24868c0> begin[:]
variable[two_digit_year] assign[=] call[call[name[str], parameter[name[year]]]][<ast.Slice object at 0x7da1b24fdba0>]
call[name[_publication_info]][constant[journal_volume]] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da1b25d2ce0>, <ast.Name object at 0x7da1b25d0c40>]]]]
call[name[result].append, parameter[name[_publication_info]]]
continue
if <ast.BoolOp object at 0x7da1b25d01f0> begin[:]
variable[match] assign[=] call[name[_RE_TITLE_ENDS_WITH_A_LETTER].match, parameter[name[journal_title]]]
if <ast.BoolOp object at 0x7da1b25d0df0> begin[:]
call[name[_publication_info]][constant[journal_title]] assign[=] call[name[match].group, parameter[constant[title]]]
call[name[_publication_info]][constant[journal_volume]] assign[=] binary_operation[name[journal_volume] + call[name[match].group, parameter[constant[letter]]]]
call[name[result].append, parameter[name[_publication_info]]]
variable[_publication_info] assign[=] call[name[copy].deepcopy, parameter[name[publication_info]]]
call[name[_publication_info]][constant[hidden]] assign[=] constant[True]
call[name[_publication_info]][constant[journal_title]] assign[=] call[name[match].group, parameter[constant[title]]]
call[name[_publication_info]][constant[journal_volume]] assign[=] binary_operation[call[name[match].group, parameter[constant[letter]]] + name[journal_volume]]
call[name[result].append, parameter[name[_publication_info]]]
return[name[result]] | keyword[def] identifier[convert_new_publication_info_to_old] ( identifier[publication_infos] ):
literal[string]
keyword[def] identifier[_needs_a_hidden_pubnote] ( identifier[journal_title] , identifier[journal_volume] ):
keyword[return] (
identifier[journal_title] keyword[in] identifier[_JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE] keyword[and]
identifier[journal_volume] keyword[in] identifier[_JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE] [ identifier[journal_title] ]
)
identifier[result] =[]
keyword[for] identifier[publication_info] keyword[in] identifier[publication_infos] :
identifier[_publication_info] = identifier[copy] . identifier[deepcopy] ( identifier[publication_info] )
identifier[journal_title] = identifier[_publication_info] . identifier[get] ( literal[string] )
keyword[try] :
identifier[journal_title] = identifier[_JOURNALS_RENAMED_NEW_TO_OLD] [ identifier[journal_title] ]
identifier[_publication_info] [ literal[string] ]= identifier[journal_title]
identifier[result] . identifier[append] ( identifier[_publication_info] )
keyword[continue]
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[journal_volume] = identifier[_publication_info] . identifier[get] ( literal[string] )
identifier[year] = identifier[_publication_info] . identifier[get] ( literal[string] )
keyword[if] ( identifier[journal_title] keyword[in] identifier[_JOURNALS_WITH_YEAR_ADDED_TO_VOLUME] keyword[and] identifier[year] keyword[and]
identifier[journal_volume] keyword[and] identifier[len] ( identifier[journal_volume] )== literal[int] ):
identifier[two_digit_year] = identifier[str] ( identifier[year] )[ literal[int] :]
identifier[_publication_info] [ literal[string] ]= literal[string] . identifier[join] ([ identifier[two_digit_year] , identifier[journal_volume] ])
identifier[result] . identifier[append] ( identifier[_publication_info] )
keyword[continue]
keyword[if] identifier[journal_title] keyword[and] identifier[journal_volume] :
identifier[match] = identifier[_RE_TITLE_ENDS_WITH_A_LETTER] . identifier[match] ( identifier[journal_title] )
keyword[if] identifier[match] keyword[and] identifier[_needs_a_hidden_pubnote] ( identifier[journal_title] , identifier[journal_volume] ):
identifier[_publication_info] [ literal[string] ]= identifier[match] . identifier[group] ( literal[string] )
identifier[_publication_info] [ literal[string] ]= identifier[journal_volume] + identifier[match] . identifier[group] ( literal[string] )
identifier[result] . identifier[append] ( identifier[_publication_info] )
identifier[_publication_info] = identifier[copy] . identifier[deepcopy] ( identifier[publication_info] )
identifier[_publication_info] [ literal[string] ]= keyword[True]
identifier[_publication_info] [ literal[string] ]= identifier[match] . identifier[group] ( literal[string] )
identifier[_publication_info] [ literal[string] ]= identifier[match] . identifier[group] ( literal[string] )+ identifier[journal_volume]
keyword[elif] identifier[match] keyword[and] identifier[journal_title] keyword[not] keyword[in] identifier[_JOURNALS_ALREADY_ENDING_WITH_A_LETTER] :
identifier[_publication_info] [ literal[string] ]= identifier[match] . identifier[group] ( literal[string] )
identifier[_publication_info] [ literal[string] ]= identifier[match] . identifier[group] ( literal[string] )+ identifier[journal_volume]
identifier[result] . identifier[append] ( identifier[_publication_info] )
keyword[return] identifier[result] | def convert_new_publication_info_to_old(publication_infos):
"""Convert back a ``publication_info`` value from the new format to the old.
Does the inverse transformation of :func:`convert_old_publication_info_to_new`,
to be used whenever we are sending back records from Labs to Legacy.
Args:
publication_infos: a ``publication_info`` in the new format.
Returns:
list(dict): a ``publication_info`` in the old format.
"""
def _needs_a_hidden_pubnote(journal_title, journal_volume):
return journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE[journal_title]
result = []
for publication_info in publication_infos:
_publication_info = copy.deepcopy(publication_info)
journal_title = _publication_info.get('journal_title')
try:
journal_title = _JOURNALS_RENAMED_NEW_TO_OLD[journal_title]
_publication_info['journal_title'] = journal_title
result.append(_publication_info)
continue # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
journal_volume = _publication_info.get('journal_volume')
year = _publication_info.get('year')
if journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and journal_volume and (len(journal_volume) == 2):
two_digit_year = str(year)[2:]
_publication_info['journal_volume'] = ''.join([two_digit_year, journal_volume])
result.append(_publication_info)
continue # depends on [control=['if'], data=[]]
if journal_title and journal_volume:
match = _RE_TITLE_ENDS_WITH_A_LETTER.match(journal_title)
if match and _needs_a_hidden_pubnote(journal_title, journal_volume):
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = journal_volume + match.group('letter')
result.append(_publication_info)
_publication_info = copy.deepcopy(publication_info)
_publication_info['hidden'] = True
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume # depends on [control=['if'], data=[]]
elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER:
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
result.append(_publication_info) # depends on [control=['for'], data=['publication_info']]
return result |
def set_ext_param(self, ext_key, param_key, val):
'''
Set the provided parameter in a set of extension parameters.
'''
if not self.extensions[ext_key]:
self.extensions[ext_key] = defaultdict(lambda: None)
self.extensions[ext_key][param_key] = val | def function[set_ext_param, parameter[self, ext_key, param_key, val]]:
constant[
Set the provided parameter in a set of extension parameters.
]
if <ast.UnaryOp object at 0x7da1b1b36bf0> begin[:]
call[name[self].extensions][name[ext_key]] assign[=] call[name[defaultdict], parameter[<ast.Lambda object at 0x7da1b1b14a30>]]
call[call[name[self].extensions][name[ext_key]]][name[param_key]] assign[=] name[val] | keyword[def] identifier[set_ext_param] ( identifier[self] , identifier[ext_key] , identifier[param_key] , identifier[val] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[extensions] [ identifier[ext_key] ]:
identifier[self] . identifier[extensions] [ identifier[ext_key] ]= identifier[defaultdict] ( keyword[lambda] : keyword[None] )
identifier[self] . identifier[extensions] [ identifier[ext_key] ][ identifier[param_key] ]= identifier[val] | def set_ext_param(self, ext_key, param_key, val):
"""
Set the provided parameter in a set of extension parameters.
"""
if not self.extensions[ext_key]:
self.extensions[ext_key] = defaultdict(lambda : None) # depends on [control=['if'], data=[]]
self.extensions[ext_key][param_key] = val |
def _check_load_existing_object(self, object_type, id_field_name, operation='update'):
# type: (str, str, str) -> None
"""Check metadata exists and contains HDX object identifier, and if so load HDX object
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
operation (str): Operation to report if error. Defaults to update.
Returns:
None
"""
self._check_existing_object(object_type, id_field_name)
if not self._load_from_hdx(object_type, self.data[id_field_name]):
raise HDXError('No existing %s to %s!' % (object_type, operation)) | def function[_check_load_existing_object, parameter[self, object_type, id_field_name, operation]]:
constant[Check metadata exists and contains HDX object identifier, and if so load HDX object
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
operation (str): Operation to report if error. Defaults to update.
Returns:
None
]
call[name[self]._check_existing_object, parameter[name[object_type], name[id_field_name]]]
if <ast.UnaryOp object at 0x7da1b0ff1a80> begin[:]
<ast.Raise object at 0x7da1b0ff0940> | keyword[def] identifier[_check_load_existing_object] ( identifier[self] , identifier[object_type] , identifier[id_field_name] , identifier[operation] = literal[string] ):
literal[string]
identifier[self] . identifier[_check_existing_object] ( identifier[object_type] , identifier[id_field_name] )
keyword[if] keyword[not] identifier[self] . identifier[_load_from_hdx] ( identifier[object_type] , identifier[self] . identifier[data] [ identifier[id_field_name] ]):
keyword[raise] identifier[HDXError] ( literal[string] %( identifier[object_type] , identifier[operation] )) | def _check_load_existing_object(self, object_type, id_field_name, operation='update'):
# type: (str, str, str) -> None
'Check metadata exists and contains HDX object identifier, and if so load HDX object\n\n Args:\n object_type (str): Description of HDX object type (for messages)\n id_field_name (str): Name of field containing HDX object identifier\n operation (str): Operation to report if error. Defaults to update.\n\n Returns:\n None\n '
self._check_existing_object(object_type, id_field_name)
if not self._load_from_hdx(object_type, self.data[id_field_name]):
raise HDXError('No existing %s to %s!' % (object_type, operation)) # depends on [control=['if'], data=[]] |
def scheduled_snapshot(name, prefix, recursive=True, schedule=None):
'''
maintain a set of snapshots based on a schedule
name : string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
recursive : boolean
create snapshots for all children also
schedule : dict
dict holding the schedule, the following keys are available (minute, hour,
day, month, and year) by default all are set to 0 the value indicated the
number of snapshots of that type to keep around.
.. warning::
snapshots will only be created and pruned every time the state runs.
a schedule must be setup to automatically run the state. this means that if
you run the state daily the hourly snapshot will only be made once per day!
.. versionchanged:: 2018.3.0
switched to localtime from gmtime so times now take into account timezones.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
## initialize defaults
schedule_holds = ['minute', 'hour', 'day', 'month', 'year']
snapshots = OrderedDict([
('_create', OrderedDict()),
('_prunable', []),
('_schedule', OrderedDict()),
])
## strict configuration validation
## NOTE: we need a valid dataset
if not __utils__['zfs.is_dataset'](name):
ret['result'] = False
ret['comment'] = 'invalid dataset name: {0}'.format(name)
if not __salt__['zfs.exists'](name, **{'type': 'filesystem,volume'}):
ret['comment'] = 'dataset {0} does not exist'.format(name)
ret['result'] = False
## NOTE: prefix must be 4 or longer
if not prefix or len(prefix) < 4:
ret['comment'] = 'prefix ({0}) must be at least 4 long'.format(prefix)
ret['result'] = False
## NOTE: validate schedule
total_count = 0
for hold in schedule_holds:
snapshots[hold] = []
if hold not in schedule:
snapshots['_schedule'][hold] = 0
elif isinstance(schedule[hold], int):
snapshots['_schedule'][hold] = schedule[hold]
else:
ret['result'] = False
ret['comment'] = 'schedule value for {0} is not an integer'.format(
hold,
)
break
total_count += snapshots['_schedule'][hold]
if ret['result'] and total_count == 0:
ret['result'] = False
ret['comment'] = 'schedule is not valid, you need to keep atleast 1 snapshot'
## NOTE: return if configuration is not valid
if not ret['result']:
return ret
## retrieve existing snapshots
snapshots = _schedule_snapshot_retrieve(name, prefix, snapshots)
## prepare snapshot
snapshots = _schedule_snapshot_prepare(name, prefix, snapshots)
## log configuration
log.debug('zfs.scheduled_snapshot::%s::config::recursive = %s',
name, recursive)
log.debug('zfs.scheduled_snapshot::%s::config::prefix = %s',
name, prefix)
log.debug('zfs.scheduled_snapshot::%s::snapshots = %s',
name, snapshots)
## create snapshot(s)
for snapshot_name, snapshot_holds in snapshots['_create'].items():
## NOTE: skip if new snapshot has no holds
if not snapshot_holds:
continue
## NOTE: create snapshot
if not __opts__['test']:
mod_res = __salt__['zfs.snapshot'](snapshot_name, **{'recursive': recursive})
else:
mod_res = OrderedDict([('snapshotted', True)])
if not mod_res['snapshotted']:
ret['result'] = False
ret['comment'] = 'error creating snapshot ({0})'.format(snapshot_name)
else:
## NOTE: create holds (if we have a snapshot)
for hold in snapshot_holds:
if not __opts__['test']:
mod_res = __salt__['zfs.hold'](hold, snapshot_name, **{'recursive': recursive})
else:
mod_res = OrderedDict([('held', True)])
if not mod_res['held']:
ret['result'] = False
ret['comment'] = "error adding hold ({0}) to snapshot ({1})".format(
hold,
snapshot_name,
)
break
snapshots[hold].append(snapshot_name)
if ret['result']:
ret['comment'] = 'scheduled snapshots updated'
if 'created' not in ret['changes']:
ret['changes']['created'] = []
ret['changes']['created'].append(snapshot_name)
## prune hold(s)
for hold, hold_count in snapshots['_schedule'].items():
while ret['result'] and len(snapshots[hold]) > hold_count:
## NOTE: pop oldest snapshot
snapshot_name = snapshots[hold].pop(0)
## NOTE: release hold for snapshot
if not __opts__['test']:
mod_res = __salt__['zfs.release'](hold, snapshot_name, **{'recursive': recursive})
else:
mod_res = OrderedDict([('released', True)])
if not mod_res['released']:
ret['result'] = False
ret['comment'] = "error adding hold ({0}) to snapshot ({1})".format(
hold,
snapshot_name,
)
## NOTE: mark as prunable
if not __salt__['zfs.holds'](snapshot_name):
snapshots['_prunable'].append(snapshot_name)
## prune snapshot(s)
for snapshot_name in snapshots['_prunable']:
## NOTE: destroy snapshot
if not __opts__['test']:
mod_res = __salt__['zfs.destroy'](snapshot_name, **{'recursive': recursive})
else:
mod_res = OrderedDict([('destroyed', True)])
if not mod_res['destroyed']:
ret['result'] = False
ret['comment'] = "error prunding snapshot ({1})".format(
snapshot_name,
)
break
if ret['result'] and snapshots['_prunable']:
ret['comment'] = 'scheduled snapshots updated'
ret['changes']['pruned'] = snapshots['_prunable']
if ret['result'] and not ret['changes']:
ret['comment'] = 'scheduled snapshots are up to date'
return ret | def function[scheduled_snapshot, parameter[name, prefix, recursive, schedule]]:
constant[
maintain a set of snapshots based on a schedule
name : string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
recursive : boolean
create snapshots for all children also
schedule : dict
dict holding the schedule, the following keys are available (minute, hour,
day, month, and year) by default all are set to 0 the value indicated the
number of snapshots of that type to keep around.
.. warning::
snapshots will only be created and pruned every time the state runs.
a schedule must be setup to automatically run the state. this means that if
you run the state daily the hourly snapshot will only be made once per day!
.. versionchanged:: 2018.3.0
switched to localtime from gmtime so times now take into account timezones.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2102710>, <ast.Constant object at 0x7da1b21026e0>, <ast.Constant object at 0x7da1b21026b0>, <ast.Constant object at 0x7da1b2102680>], [<ast.Name object at 0x7da1b2102650>, <ast.Dict object at 0x7da1b2102620>, <ast.Constant object at 0x7da1b21025f0>, <ast.Constant object at 0x7da1b21025c0>]]
variable[schedule_holds] assign[=] list[[<ast.Constant object at 0x7da1b21024d0>, <ast.Constant object at 0x7da1b21024a0>, <ast.Constant object at 0x7da1b2102470>, <ast.Constant object at 0x7da1b2102440>, <ast.Constant object at 0x7da1b2102410>]]
variable[snapshots] assign[=] call[name[OrderedDict], parameter[list[[<ast.Tuple object at 0x7da1b21022f0>, <ast.Tuple object at 0x7da1b2102230>, <ast.Tuple object at 0x7da1b21021a0>]]]]
if <ast.UnaryOp object at 0x7da1b21020b0> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[invalid dataset name: {0}].format, parameter[name[name]]]
if <ast.UnaryOp object at 0x7da1b2101cf0> begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[dataset {0} does not exist].format, parameter[name[name]]]
call[name[ret]][constant[result]] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b2101870> begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[prefix ({0}) must be at least 4 long].format, parameter[name[prefix]]]
call[name[ret]][constant[result]] assign[=] constant[False]
variable[total_count] assign[=] constant[0]
for taget[name[hold]] in starred[name[schedule_holds]] begin[:]
call[name[snapshots]][name[hold]] assign[=] list[[]]
if compare[name[hold] <ast.NotIn object at 0x7da2590d7190> name[schedule]] begin[:]
call[call[name[snapshots]][constant[_schedule]]][name[hold]] assign[=] constant[0]
<ast.AugAssign object at 0x7da1b2160b20>
if <ast.BoolOp object at 0x7da1b2161270> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] constant[schedule is not valid, you need to keep atleast 1 snapshot]
if <ast.UnaryOp object at 0x7da1b2160640> begin[:]
return[name[ret]]
variable[snapshots] assign[=] call[name[_schedule_snapshot_retrieve], parameter[name[name], name[prefix], name[snapshots]]]
variable[snapshots] assign[=] call[name[_schedule_snapshot_prepare], parameter[name[name], name[prefix], name[snapshots]]]
call[name[log].debug, parameter[constant[zfs.scheduled_snapshot::%s::config::recursive = %s], name[name], name[recursive]]]
call[name[log].debug, parameter[constant[zfs.scheduled_snapshot::%s::config::prefix = %s], name[name], name[prefix]]]
call[name[log].debug, parameter[constant[zfs.scheduled_snapshot::%s::snapshots = %s], name[name], name[snapshots]]]
for taget[tuple[[<ast.Name object at 0x7da1b2160910>, <ast.Name object at 0x7da1b2161360>]]] in starred[call[call[name[snapshots]][constant[_create]].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b2161570> begin[:]
continue
if <ast.UnaryOp object at 0x7da1b2161600> begin[:]
variable[mod_res] assign[=] call[call[name[__salt__]][constant[zfs.snapshot]], parameter[name[snapshot_name]]]
if <ast.UnaryOp object at 0x7da1b2100cd0> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[error creating snapshot ({0})].format, parameter[name[snapshot_name]]]
if call[name[ret]][constant[result]] begin[:]
call[name[ret]][constant[comment]] assign[=] constant[scheduled snapshots updated]
if compare[constant[created] <ast.NotIn object at 0x7da2590d7190> call[name[ret]][constant[changes]]] begin[:]
call[call[name[ret]][constant[changes]]][constant[created]] assign[=] list[[]]
call[call[call[name[ret]][constant[changes]]][constant[created]].append, parameter[name[snapshot_name]]]
for taget[tuple[[<ast.Name object at 0x7da1b1f35420>, <ast.Name object at 0x7da1b1f35d50>]]] in starred[call[call[name[snapshots]][constant[_schedule]].items, parameter[]]] begin[:]
while <ast.BoolOp object at 0x7da1b1f357b0> begin[:]
variable[snapshot_name] assign[=] call[call[name[snapshots]][name[hold]].pop, parameter[constant[0]]]
if <ast.UnaryOp object at 0x7da1b1f36e60> begin[:]
variable[mod_res] assign[=] call[call[name[__salt__]][constant[zfs.release]], parameter[name[hold], name[snapshot_name]]]
if <ast.UnaryOp object at 0x7da1b1f35360> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[error adding hold ({0}) to snapshot ({1})].format, parameter[name[hold], name[snapshot_name]]]
if <ast.UnaryOp object at 0x7da1b21f8400> begin[:]
call[call[name[snapshots]][constant[_prunable]].append, parameter[name[snapshot_name]]]
for taget[name[snapshot_name]] in starred[call[name[snapshots]][constant[_prunable]]] begin[:]
if <ast.UnaryOp object at 0x7da1b21fbfd0> begin[:]
variable[mod_res] assign[=] call[call[name[__salt__]][constant[zfs.destroy]], parameter[name[snapshot_name]]]
if <ast.UnaryOp object at 0x7da1b21f97e0> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[error prunding snapshot ({1})].format, parameter[name[snapshot_name]]]
break
if <ast.BoolOp object at 0x7da1b21f8970> begin[:]
call[name[ret]][constant[comment]] assign[=] constant[scheduled snapshots updated]
call[call[name[ret]][constant[changes]]][constant[pruned]] assign[=] call[name[snapshots]][constant[_prunable]]
if <ast.BoolOp object at 0x7da1b21f9cf0> begin[:]
call[name[ret]][constant[comment]] assign[=] constant[scheduled snapshots are up to date]
return[name[ret]] | keyword[def] identifier[scheduled_snapshot] ( identifier[name] , identifier[prefix] , identifier[recursive] = keyword[True] , identifier[schedule] = keyword[None] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[True] ,
literal[string] : literal[string] }
identifier[schedule_holds] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[snapshots] = identifier[OrderedDict] ([
( literal[string] , identifier[OrderedDict] ()),
( literal[string] ,[]),
( literal[string] , identifier[OrderedDict] ()),
])
keyword[if] keyword[not] identifier[__utils__] [ literal[string] ]( identifier[name] ):
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[if] keyword[not] identifier[__salt__] [ literal[string] ]( identifier[name] ,**{ literal[string] : literal[string] }):
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= keyword[False]
keyword[if] keyword[not] identifier[prefix] keyword[or] identifier[len] ( identifier[prefix] )< literal[int] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[prefix] )
identifier[ret] [ literal[string] ]= keyword[False]
identifier[total_count] = literal[int]
keyword[for] identifier[hold] keyword[in] identifier[schedule_holds] :
identifier[snapshots] [ identifier[hold] ]=[]
keyword[if] identifier[hold] keyword[not] keyword[in] identifier[schedule] :
identifier[snapshots] [ literal[string] ][ identifier[hold] ]= literal[int]
keyword[elif] identifier[isinstance] ( identifier[schedule] [ identifier[hold] ], identifier[int] ):
identifier[snapshots] [ literal[string] ][ identifier[hold] ]= identifier[schedule] [ identifier[hold] ]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[hold] ,
)
keyword[break]
identifier[total_count] += identifier[snapshots] [ literal[string] ][ identifier[hold] ]
keyword[if] identifier[ret] [ literal[string] ] keyword[and] identifier[total_count] == literal[int] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[if] keyword[not] identifier[ret] [ literal[string] ]:
keyword[return] identifier[ret]
identifier[snapshots] = identifier[_schedule_snapshot_retrieve] ( identifier[name] , identifier[prefix] , identifier[snapshots] )
identifier[snapshots] = identifier[_schedule_snapshot_prepare] ( identifier[name] , identifier[prefix] , identifier[snapshots] )
identifier[log] . identifier[debug] ( literal[string] ,
identifier[name] , identifier[recursive] )
identifier[log] . identifier[debug] ( literal[string] ,
identifier[name] , identifier[prefix] )
identifier[log] . identifier[debug] ( literal[string] ,
identifier[name] , identifier[snapshots] )
keyword[for] identifier[snapshot_name] , identifier[snapshot_holds] keyword[in] identifier[snapshots] [ literal[string] ]. identifier[items] ():
keyword[if] keyword[not] identifier[snapshot_holds] :
keyword[continue]
keyword[if] keyword[not] identifier[__opts__] [ literal[string] ]:
identifier[mod_res] = identifier[__salt__] [ literal[string] ]( identifier[snapshot_name] ,**{ literal[string] : identifier[recursive] })
keyword[else] :
identifier[mod_res] = identifier[OrderedDict] ([( literal[string] , keyword[True] )])
keyword[if] keyword[not] identifier[mod_res] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[snapshot_name] )
keyword[else] :
keyword[for] identifier[hold] keyword[in] identifier[snapshot_holds] :
keyword[if] keyword[not] identifier[__opts__] [ literal[string] ]:
identifier[mod_res] = identifier[__salt__] [ literal[string] ]( identifier[hold] , identifier[snapshot_name] ,**{ literal[string] : identifier[recursive] })
keyword[else] :
identifier[mod_res] = identifier[OrderedDict] ([( literal[string] , keyword[True] )])
keyword[if] keyword[not] identifier[mod_res] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[hold] ,
identifier[snapshot_name] ,
)
keyword[break]
identifier[snapshots] [ identifier[hold] ]. identifier[append] ( identifier[snapshot_name] )
keyword[if] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ][ literal[string] ]=[]
identifier[ret] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[snapshot_name] )
keyword[for] identifier[hold] , identifier[hold_count] keyword[in] identifier[snapshots] [ literal[string] ]. identifier[items] ():
keyword[while] identifier[ret] [ literal[string] ] keyword[and] identifier[len] ( identifier[snapshots] [ identifier[hold] ])> identifier[hold_count] :
identifier[snapshot_name] = identifier[snapshots] [ identifier[hold] ]. identifier[pop] ( literal[int] )
keyword[if] keyword[not] identifier[__opts__] [ literal[string] ]:
identifier[mod_res] = identifier[__salt__] [ literal[string] ]( identifier[hold] , identifier[snapshot_name] ,**{ literal[string] : identifier[recursive] })
keyword[else] :
identifier[mod_res] = identifier[OrderedDict] ([( literal[string] , keyword[True] )])
keyword[if] keyword[not] identifier[mod_res] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[hold] ,
identifier[snapshot_name] ,
)
keyword[if] keyword[not] identifier[__salt__] [ literal[string] ]( identifier[snapshot_name] ):
identifier[snapshots] [ literal[string] ]. identifier[append] ( identifier[snapshot_name] )
keyword[for] identifier[snapshot_name] keyword[in] identifier[snapshots] [ literal[string] ]:
keyword[if] keyword[not] identifier[__opts__] [ literal[string] ]:
identifier[mod_res] = identifier[__salt__] [ literal[string] ]( identifier[snapshot_name] ,**{ literal[string] : identifier[recursive] })
keyword[else] :
identifier[mod_res] = identifier[OrderedDict] ([( literal[string] , keyword[True] )])
keyword[if] keyword[not] identifier[mod_res] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[snapshot_name] ,
)
keyword[break]
keyword[if] identifier[ret] [ literal[string] ] keyword[and] identifier[snapshots] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ][ literal[string] ]= identifier[snapshots] [ literal[string] ]
keyword[if] identifier[ret] [ literal[string] ] keyword[and] keyword[not] identifier[ret] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret] | def scheduled_snapshot(name, prefix, recursive=True, schedule=None):
"""
maintain a set of snapshots based on a schedule
name : string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
recursive : boolean
create snapshots for all children also
schedule : dict
dict holding the schedule, the following keys are available (minute, hour,
day, month, and year) by default all are set to 0 the value indicated the
number of snapshots of that type to keep around.
.. warning::
snapshots will only be created and pruned every time the state runs.
a schedule must be setup to automatically run the state. this means that if
you run the state daily the hourly snapshot will only be made once per day!
.. versionchanged:: 2018.3.0
switched to localtime from gmtime so times now take into account timezones.
"""
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
## initialize defaults
schedule_holds = ['minute', 'hour', 'day', 'month', 'year']
snapshots = OrderedDict([('_create', OrderedDict()), ('_prunable', []), ('_schedule', OrderedDict())])
## strict configuration validation
## NOTE: we need a valid dataset
if not __utils__['zfs.is_dataset'](name):
ret['result'] = False
ret['comment'] = 'invalid dataset name: {0}'.format(name) # depends on [control=['if'], data=[]]
if not __salt__['zfs.exists'](name, **{'type': 'filesystem,volume'}):
ret['comment'] = 'dataset {0} does not exist'.format(name)
ret['result'] = False # depends on [control=['if'], data=[]]
## NOTE: prefix must be 4 or longer
if not prefix or len(prefix) < 4:
ret['comment'] = 'prefix ({0}) must be at least 4 long'.format(prefix)
ret['result'] = False # depends on [control=['if'], data=[]]
## NOTE: validate schedule
total_count = 0
for hold in schedule_holds:
snapshots[hold] = []
if hold not in schedule:
snapshots['_schedule'][hold] = 0 # depends on [control=['if'], data=['hold']]
elif isinstance(schedule[hold], int):
snapshots['_schedule'][hold] = schedule[hold] # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'schedule value for {0} is not an integer'.format(hold)
break
total_count += snapshots['_schedule'][hold] # depends on [control=['for'], data=['hold']]
if ret['result'] and total_count == 0:
ret['result'] = False
ret['comment'] = 'schedule is not valid, you need to keep atleast 1 snapshot' # depends on [control=['if'], data=[]]
## NOTE: return if configuration is not valid
if not ret['result']:
return ret # depends on [control=['if'], data=[]]
## retrieve existing snapshots
snapshots = _schedule_snapshot_retrieve(name, prefix, snapshots)
## prepare snapshot
snapshots = _schedule_snapshot_prepare(name, prefix, snapshots)
## log configuration
log.debug('zfs.scheduled_snapshot::%s::config::recursive = %s', name, recursive)
log.debug('zfs.scheduled_snapshot::%s::config::prefix = %s', name, prefix)
log.debug('zfs.scheduled_snapshot::%s::snapshots = %s', name, snapshots)
## create snapshot(s)
for (snapshot_name, snapshot_holds) in snapshots['_create'].items():
## NOTE: skip if new snapshot has no holds
if not snapshot_holds:
continue # depends on [control=['if'], data=[]]
## NOTE: create snapshot
if not __opts__['test']:
mod_res = __salt__['zfs.snapshot'](snapshot_name, **{'recursive': recursive}) # depends on [control=['if'], data=[]]
else:
mod_res = OrderedDict([('snapshotted', True)])
if not mod_res['snapshotted']:
ret['result'] = False
ret['comment'] = 'error creating snapshot ({0})'.format(snapshot_name) # depends on [control=['if'], data=[]]
else:
## NOTE: create holds (if we have a snapshot)
for hold in snapshot_holds:
if not __opts__['test']:
mod_res = __salt__['zfs.hold'](hold, snapshot_name, **{'recursive': recursive}) # depends on [control=['if'], data=[]]
else:
mod_res = OrderedDict([('held', True)])
if not mod_res['held']:
ret['result'] = False
ret['comment'] = 'error adding hold ({0}) to snapshot ({1})'.format(hold, snapshot_name)
break # depends on [control=['if'], data=[]]
snapshots[hold].append(snapshot_name) # depends on [control=['for'], data=['hold']]
if ret['result']:
ret['comment'] = 'scheduled snapshots updated'
if 'created' not in ret['changes']:
ret['changes']['created'] = [] # depends on [control=['if'], data=[]]
ret['changes']['created'].append(snapshot_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
## prune hold(s)
for (hold, hold_count) in snapshots['_schedule'].items():
while ret['result'] and len(snapshots[hold]) > hold_count:
## NOTE: pop oldest snapshot
snapshot_name = snapshots[hold].pop(0)
## NOTE: release hold for snapshot
if not __opts__['test']:
mod_res = __salt__['zfs.release'](hold, snapshot_name, **{'recursive': recursive}) # depends on [control=['if'], data=[]]
else:
mod_res = OrderedDict([('released', True)])
if not mod_res['released']:
ret['result'] = False
ret['comment'] = 'error adding hold ({0}) to snapshot ({1})'.format(hold, snapshot_name) # depends on [control=['if'], data=[]]
## NOTE: mark as prunable
if not __salt__['zfs.holds'](snapshot_name):
snapshots['_prunable'].append(snapshot_name) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['for'], data=[]]
## prune snapshot(s)
for snapshot_name in snapshots['_prunable']:
## NOTE: destroy snapshot
if not __opts__['test']:
mod_res = __salt__['zfs.destroy'](snapshot_name, **{'recursive': recursive}) # depends on [control=['if'], data=[]]
else:
mod_res = OrderedDict([('destroyed', True)])
if not mod_res['destroyed']:
ret['result'] = False
ret['comment'] = 'error prunding snapshot ({1})'.format(snapshot_name)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['snapshot_name']]
if ret['result'] and snapshots['_prunable']:
ret['comment'] = 'scheduled snapshots updated'
ret['changes']['pruned'] = snapshots['_prunable'] # depends on [control=['if'], data=[]]
if ret['result'] and (not ret['changes']):
ret['comment'] = 'scheduled snapshots are up to date' # depends on [control=['if'], data=[]]
return ret |
def database_caller_creator(self, host, port, name=None):
'''creates a mongodb database
returns the related connection object
which will be later used to spawn the cursor
'''
client = pymongo.MongoClient(host, port)
if name:
db = client[name]
else:
db = client['mongodb_' + str_generator(self)]
return db | def function[database_caller_creator, parameter[self, host, port, name]]:
constant[creates a mongodb database
returns the related connection object
which will be later used to spawn the cursor
]
variable[client] assign[=] call[name[pymongo].MongoClient, parameter[name[host], name[port]]]
if name[name] begin[:]
variable[db] assign[=] call[name[client]][name[name]]
return[name[db]] | keyword[def] identifier[database_caller_creator] ( identifier[self] , identifier[host] , identifier[port] , identifier[name] = keyword[None] ):
literal[string]
identifier[client] = identifier[pymongo] . identifier[MongoClient] ( identifier[host] , identifier[port] )
keyword[if] identifier[name] :
identifier[db] = identifier[client] [ identifier[name] ]
keyword[else] :
identifier[db] = identifier[client] [ literal[string] + identifier[str_generator] ( identifier[self] )]
keyword[return] identifier[db] | def database_caller_creator(self, host, port, name=None):
"""creates a mongodb database
returns the related connection object
which will be later used to spawn the cursor
"""
client = pymongo.MongoClient(host, port)
if name:
db = client[name] # depends on [control=['if'], data=[]]
else:
db = client['mongodb_' + str_generator(self)]
return db |
def auto_open(self, state=None):
"""Get or set automatic TCP connect mode
:param state: auto_open state or None for get value
:type state: bool or None
:returns: auto_open state or None if set fail
:rtype: bool or None
"""
if state is None:
return self.__auto_open
self.__auto_open = bool(state)
return self.__auto_open | def function[auto_open, parameter[self, state]]:
constant[Get or set automatic TCP connect mode
:param state: auto_open state or None for get value
:type state: bool or None
:returns: auto_open state or None if set fail
:rtype: bool or None
]
if compare[name[state] is constant[None]] begin[:]
return[name[self].__auto_open]
name[self].__auto_open assign[=] call[name[bool], parameter[name[state]]]
return[name[self].__auto_open] | keyword[def] identifier[auto_open] ( identifier[self] , identifier[state] = keyword[None] ):
literal[string]
keyword[if] identifier[state] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[__auto_open]
identifier[self] . identifier[__auto_open] = identifier[bool] ( identifier[state] )
keyword[return] identifier[self] . identifier[__auto_open] | def auto_open(self, state=None):
"""Get or set automatic TCP connect mode
:param state: auto_open state or None for get value
:type state: bool or None
:returns: auto_open state or None if set fail
:rtype: bool or None
"""
if state is None:
return self.__auto_open # depends on [control=['if'], data=[]]
self.__auto_open = bool(state)
return self.__auto_open |
def get_target_temp(self):
"""
Get the target temperature.
"""
if not self._connected:
return
temp_ovrd = self._protocol.status.get(DATA_ROOM_SETPOINT_OVRD)
if temp_ovrd:
return temp_ovrd
return self._protocol.status.get(DATA_ROOM_SETPOINT) | def function[get_target_temp, parameter[self]]:
constant[
Get the target temperature.
]
if <ast.UnaryOp object at 0x7da18c4ccf10> begin[:]
return[None]
variable[temp_ovrd] assign[=] call[name[self]._protocol.status.get, parameter[name[DATA_ROOM_SETPOINT_OVRD]]]
if name[temp_ovrd] begin[:]
return[name[temp_ovrd]]
return[call[name[self]._protocol.status.get, parameter[name[DATA_ROOM_SETPOINT]]]] | keyword[def] identifier[get_target_temp] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_connected] :
keyword[return]
identifier[temp_ovrd] = identifier[self] . identifier[_protocol] . identifier[status] . identifier[get] ( identifier[DATA_ROOM_SETPOINT_OVRD] )
keyword[if] identifier[temp_ovrd] :
keyword[return] identifier[temp_ovrd]
keyword[return] identifier[self] . identifier[_protocol] . identifier[status] . identifier[get] ( identifier[DATA_ROOM_SETPOINT] ) | def get_target_temp(self):
"""
Get the target temperature.
"""
if not self._connected:
return # depends on [control=['if'], data=[]]
temp_ovrd = self._protocol.status.get(DATA_ROOM_SETPOINT_OVRD)
if temp_ovrd:
return temp_ovrd # depends on [control=['if'], data=[]]
return self._protocol.status.get(DATA_ROOM_SETPOINT) |
def show(self, *args, **kwargs):
"""
Shows the chart URL in a webbrowser
Other arguments passed to webbrowser.open
"""
from webbrowser import open as webopen
return webopen(str(self), *args, **kwargs) | def function[show, parameter[self]]:
constant[
Shows the chart URL in a webbrowser
Other arguments passed to webbrowser.open
]
from relative_module[webbrowser] import module[open]
return[call[name[webopen], parameter[call[name[str], parameter[name[self]]], <ast.Starred object at 0x7da1b021f940>]]] | keyword[def] identifier[show] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[webbrowser] keyword[import] identifier[open] keyword[as] identifier[webopen]
keyword[return] identifier[webopen] ( identifier[str] ( identifier[self] ),* identifier[args] ,** identifier[kwargs] ) | def show(self, *args, **kwargs):
"""
Shows the chart URL in a webbrowser
Other arguments passed to webbrowser.open
"""
from webbrowser import open as webopen
return webopen(str(self), *args, **kwargs) |
def ip_monitor(self, query, days_back=0, page=1, **kwargs):
"""Pass in the IP Address you wish to query ( i.e. 199.30.228.112 )."""
return self._results('ip-monitor', '/v1/ip-monitor', query=query, days_back=days_back, page=page,
items_path=('alerts', ), **kwargs) | def function[ip_monitor, parameter[self, query, days_back, page]]:
constant[Pass in the IP Address you wish to query ( i.e. 199.30.228.112 ).]
return[call[name[self]._results, parameter[constant[ip-monitor], constant[/v1/ip-monitor]]]] | keyword[def] identifier[ip_monitor] ( identifier[self] , identifier[query] , identifier[days_back] = literal[int] , identifier[page] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_results] ( literal[string] , literal[string] , identifier[query] = identifier[query] , identifier[days_back] = identifier[days_back] , identifier[page] = identifier[page] ,
identifier[items_path] =( literal[string] ,),** identifier[kwargs] ) | def ip_monitor(self, query, days_back=0, page=1, **kwargs):
"""Pass in the IP Address you wish to query ( i.e. 199.30.228.112 )."""
return self._results('ip-monitor', '/v1/ip-monitor', query=query, days_back=days_back, page=page, items_path=('alerts',), **kwargs) |
async def create(source_id: str, proof_request: str):
"""
Create a proof for fulfilling a corresponding proof request
:param source_id: Tag associated by user of sdk
:param proof_request: Proof Request data sent by requestor.
Example:
source_id = 'sourceId'
request = {
"@topic": {
"mid": 9,
"tid": 1
},
"@type": {
"name": "PROOF_REQUEST",
"version":"1.0"
},
"msg_ref_id": "ymy5nth",
"proof_request_data": {
"name": "Account Certificate",
"nonce": "838186471541979035208225",
"requested_attributes": {
"business_2": {
"name": "business"
},
"email_1": {
"name": "email"
},
"name_0": {
"name": "name"
}
},
"requested_predicates": {},
"version": "0.1"
}
}
disclosed_proof = await DisclosedProof.create(source_id, request)
:return: Disclosed Proof Object
"""
constructor_params = (source_id,)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_proof_request = c_char_p(json.dumps(proof_request).encode('utf-8'))
c_params = (c_source_id, c_proof_request, )
return await DisclosedProof._create("vcx_disclosed_proof_create_with_request",
constructor_params,
c_params) | <ast.AsyncFunctionDef object at 0x7da207f039d0> | keyword[async] keyword[def] identifier[create] ( identifier[source_id] : identifier[str] , identifier[proof_request] : identifier[str] ):
literal[string]
identifier[constructor_params] =( identifier[source_id] ,)
identifier[c_source_id] = identifier[c_char_p] ( identifier[source_id] . identifier[encode] ( literal[string] ))
identifier[c_proof_request] = identifier[c_char_p] ( identifier[json] . identifier[dumps] ( identifier[proof_request] ). identifier[encode] ( literal[string] ))
identifier[c_params] =( identifier[c_source_id] , identifier[c_proof_request] ,)
keyword[return] keyword[await] identifier[DisclosedProof] . identifier[_create] ( literal[string] ,
identifier[constructor_params] ,
identifier[c_params] ) | async def create(source_id: str, proof_request: str):
"""
Create a proof for fulfilling a corresponding proof request
:param source_id: Tag associated by user of sdk
:param proof_request: Proof Request data sent by requestor.
Example:
source_id = 'sourceId'
request = {
"@topic": {
"mid": 9,
"tid": 1
},
"@type": {
"name": "PROOF_REQUEST",
"version":"1.0"
},
"msg_ref_id": "ymy5nth",
"proof_request_data": {
"name": "Account Certificate",
"nonce": "838186471541979035208225",
"requested_attributes": {
"business_2": {
"name": "business"
},
"email_1": {
"name": "email"
},
"name_0": {
"name": "name"
}
},
"requested_predicates": {},
"version": "0.1"
}
}
disclosed_proof = await DisclosedProof.create(source_id, request)
:return: Disclosed Proof Object
"""
constructor_params = (source_id,)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_proof_request = c_char_p(json.dumps(proof_request).encode('utf-8'))
c_params = (c_source_id, c_proof_request)
return await DisclosedProof._create('vcx_disclosed_proof_create_with_request', constructor_params, c_params) |
def _create_state_data(self, context, resp_args, relay_state):
"""
Returns a dict containing the state needed in the response flow.
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
:param context: The current context
:param resp_args: Response arguments
:param relay_state: Request relay state
:return: A state as a dict
"""
if "name_id_policy" in resp_args and resp_args["name_id_policy"] is not None:
resp_args["name_id_policy"] = resp_args["name_id_policy"].to_string().decode("utf-8")
return {"resp_args": resp_args, "relay_state": relay_state} | def function[_create_state_data, parameter[self, context, resp_args, relay_state]]:
constant[
Returns a dict containing the state needed in the response flow.
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
:param context: The current context
:param resp_args: Response arguments
:param relay_state: Request relay state
:return: A state as a dict
]
if <ast.BoolOp object at 0x7da1b1502530> begin[:]
call[name[resp_args]][constant[name_id_policy]] assign[=] call[call[call[name[resp_args]][constant[name_id_policy]].to_string, parameter[]].decode, parameter[constant[utf-8]]]
return[dictionary[[<ast.Constant object at 0x7da1b1500220>, <ast.Constant object at 0x7da1b1503760>], [<ast.Name object at 0x7da1b1502470>, <ast.Name object at 0x7da1b1501ab0>]]] | keyword[def] identifier[_create_state_data] ( identifier[self] , identifier[context] , identifier[resp_args] , identifier[relay_state] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[resp_args] keyword[and] identifier[resp_args] [ literal[string] ] keyword[is] keyword[not] keyword[None] :
identifier[resp_args] [ literal[string] ]= identifier[resp_args] [ literal[string] ]. identifier[to_string] (). identifier[decode] ( literal[string] )
keyword[return] { literal[string] : identifier[resp_args] , literal[string] : identifier[relay_state] } | def _create_state_data(self, context, resp_args, relay_state):
"""
Returns a dict containing the state needed in the response flow.
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
:param context: The current context
:param resp_args: Response arguments
:param relay_state: Request relay state
:return: A state as a dict
"""
if 'name_id_policy' in resp_args and resp_args['name_id_policy'] is not None:
resp_args['name_id_policy'] = resp_args['name_id_policy'].to_string().decode('utf-8') # depends on [control=['if'], data=[]]
return {'resp_args': resp_args, 'relay_state': relay_state} |
def render_response(self, sort=True):
"""Render the dict's Cookie objects into list of strings formatted for
HTTP response headers (detailed 'Set-Cookie: ' style).
"""
rendered = [cookie.render_response() for cookie in self.values()]
return rendered if not sort else sorted(rendered) | def function[render_response, parameter[self, sort]]:
constant[Render the dict's Cookie objects into list of strings formatted for
HTTP response headers (detailed 'Set-Cookie: ' style).
]
variable[rendered] assign[=] <ast.ListComp object at 0x7da18f00eda0>
return[<ast.IfExp object at 0x7da18f00f7c0>] | keyword[def] identifier[render_response] ( identifier[self] , identifier[sort] = keyword[True] ):
literal[string]
identifier[rendered] =[ identifier[cookie] . identifier[render_response] () keyword[for] identifier[cookie] keyword[in] identifier[self] . identifier[values] ()]
keyword[return] identifier[rendered] keyword[if] keyword[not] identifier[sort] keyword[else] identifier[sorted] ( identifier[rendered] ) | def render_response(self, sort=True):
"""Render the dict's Cookie objects into list of strings formatted for
HTTP response headers (detailed 'Set-Cookie: ' style).
"""
rendered = [cookie.render_response() for cookie in self.values()]
return rendered if not sort else sorted(rendered) |
def get_etags_and_matchers(self, request):
"""Get the etags from the header and perform a validation against the required preconditions."""
# evaluate the preconditions, raises 428 if condition is not met
self.evaluate_preconditions(request)
# alright, headers are present, extract the values and match the conditions
return super(APIETAGProcessor, self).get_etags_and_matchers(request) | def function[get_etags_and_matchers, parameter[self, request]]:
constant[Get the etags from the header and perform a validation against the required preconditions.]
call[name[self].evaluate_preconditions, parameter[name[request]]]
return[call[call[name[super], parameter[name[APIETAGProcessor], name[self]]].get_etags_and_matchers, parameter[name[request]]]] | keyword[def] identifier[get_etags_and_matchers] ( identifier[self] , identifier[request] ):
literal[string]
identifier[self] . identifier[evaluate_preconditions] ( identifier[request] )
keyword[return] identifier[super] ( identifier[APIETAGProcessor] , identifier[self] ). identifier[get_etags_and_matchers] ( identifier[request] ) | def get_etags_and_matchers(self, request):
"""Get the etags from the header and perform a validation against the required preconditions."""
# evaluate the preconditions, raises 428 if condition is not met
self.evaluate_preconditions(request)
# alright, headers are present, extract the values and match the conditions
return super(APIETAGProcessor, self).get_etags_and_matchers(request) |
def update_free_item_coupon_by_id(cls, free_item_coupon_id, free_item_coupon, **kwargs):
"""Update FreeItemCoupon
Update attributes of FreeItemCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_free_item_coupon_by_id(free_item_coupon_id, free_item_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str free_item_coupon_id: ID of freeItemCoupon to update. (required)
:param FreeItemCoupon free_item_coupon: Attributes of freeItemCoupon to update. (required)
:return: FreeItemCoupon
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_free_item_coupon_by_id_with_http_info(free_item_coupon_id, free_item_coupon, **kwargs)
else:
(data) = cls._update_free_item_coupon_by_id_with_http_info(free_item_coupon_id, free_item_coupon, **kwargs)
return data | def function[update_free_item_coupon_by_id, parameter[cls, free_item_coupon_id, free_item_coupon]]:
constant[Update FreeItemCoupon
Update attributes of FreeItemCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_free_item_coupon_by_id(free_item_coupon_id, free_item_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str free_item_coupon_id: ID of freeItemCoupon to update. (required)
:param FreeItemCoupon free_item_coupon: Attributes of freeItemCoupon to update. (required)
:return: FreeItemCoupon
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._update_free_item_coupon_by_id_with_http_info, parameter[name[free_item_coupon_id], name[free_item_coupon]]]] | keyword[def] identifier[update_free_item_coupon_by_id] ( identifier[cls] , identifier[free_item_coupon_id] , identifier[free_item_coupon] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_update_free_item_coupon_by_id_with_http_info] ( identifier[free_item_coupon_id] , identifier[free_item_coupon] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_update_free_item_coupon_by_id_with_http_info] ( identifier[free_item_coupon_id] , identifier[free_item_coupon] ,** identifier[kwargs] )
keyword[return] identifier[data] | def update_free_item_coupon_by_id(cls, free_item_coupon_id, free_item_coupon, **kwargs):
"""Update FreeItemCoupon
Update attributes of FreeItemCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_free_item_coupon_by_id(free_item_coupon_id, free_item_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str free_item_coupon_id: ID of freeItemCoupon to update. (required)
:param FreeItemCoupon free_item_coupon: Attributes of freeItemCoupon to update. (required)
:return: FreeItemCoupon
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_free_item_coupon_by_id_with_http_info(free_item_coupon_id, free_item_coupon, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._update_free_item_coupon_by_id_with_http_info(free_item_coupon_id, free_item_coupon, **kwargs)
return data |
def put_scancodes(self, scancodes):
"""Sends an array of scancodes to the keyboard.
in scancodes of type int
return codes_stored of type int
raises :class:`VBoxErrorIprtError`
Could not send all scan codes to virtual keyboard.
"""
if not isinstance(scancodes, list):
raise TypeError("scancodes can only be an instance of type list")
for a in scancodes[:10]:
if not isinstance(a, baseinteger):
raise TypeError(
"array can only contain objects of type baseinteger")
codes_stored = self._call("putScancodes",
in_p=[scancodes])
return codes_stored | def function[put_scancodes, parameter[self, scancodes]]:
constant[Sends an array of scancodes to the keyboard.
in scancodes of type int
return codes_stored of type int
raises :class:`VBoxErrorIprtError`
Could not send all scan codes to virtual keyboard.
]
if <ast.UnaryOp object at 0x7da2047e8370> begin[:]
<ast.Raise object at 0x7da2047e9ff0>
for taget[name[a]] in starred[call[name[scancodes]][<ast.Slice object at 0x7da204345cf0>]] begin[:]
if <ast.UnaryOp object at 0x7da2043453c0> begin[:]
<ast.Raise object at 0x7da204347190>
variable[codes_stored] assign[=] call[name[self]._call, parameter[constant[putScancodes]]]
return[name[codes_stored]] | keyword[def] identifier[put_scancodes] ( identifier[self] , identifier[scancodes] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[scancodes] , identifier[list] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[for] identifier[a] keyword[in] identifier[scancodes] [: literal[int] ]:
keyword[if] keyword[not] identifier[isinstance] ( identifier[a] , identifier[baseinteger] ):
keyword[raise] identifier[TypeError] (
literal[string] )
identifier[codes_stored] = identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[scancodes] ])
keyword[return] identifier[codes_stored] | def put_scancodes(self, scancodes):
"""Sends an array of scancodes to the keyboard.
in scancodes of type int
return codes_stored of type int
raises :class:`VBoxErrorIprtError`
Could not send all scan codes to virtual keyboard.
"""
if not isinstance(scancodes, list):
raise TypeError('scancodes can only be an instance of type list') # depends on [control=['if'], data=[]]
for a in scancodes[:10]:
if not isinstance(a, baseinteger):
raise TypeError('array can only contain objects of type baseinteger') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
codes_stored = self._call('putScancodes', in_p=[scancodes])
return codes_stored |
def _auto_client_files(cls, client, ca_path=None, ca_contents=None, cert_path=None,
cert_contents=None, key_path=None, key_contents=None):
"""
returns a list of NetJSON extra files for automatically generated clients
produces side effects in ``client`` dictionary
"""
files = []
if ca_path and ca_contents:
client['ca'] = ca_path
files.append(dict(path=ca_path,
contents=ca_contents,
mode=DEFAULT_FILE_MODE))
if cert_path and cert_contents:
client['cert'] = cert_path
files.append(dict(path=cert_path,
contents=cert_contents,
mode=DEFAULT_FILE_MODE))
if key_path and key_contents:
client['key'] = key_path
files.append(dict(path=key_path,
contents=key_contents,
mode=DEFAULT_FILE_MODE,))
return files | def function[_auto_client_files, parameter[cls, client, ca_path, ca_contents, cert_path, cert_contents, key_path, key_contents]]:
constant[
returns a list of NetJSON extra files for automatically generated clients
produces side effects in ``client`` dictionary
]
variable[files] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b0126980> begin[:]
call[name[client]][constant[ca]] assign[=] name[ca_path]
call[name[files].append, parameter[call[name[dict], parameter[]]]]
if <ast.BoolOp object at 0x7da1b01272b0> begin[:]
call[name[client]][constant[cert]] assign[=] name[cert_path]
call[name[files].append, parameter[call[name[dict], parameter[]]]]
if <ast.BoolOp object at 0x7da1b0127010> begin[:]
call[name[client]][constant[key]] assign[=] name[key_path]
call[name[files].append, parameter[call[name[dict], parameter[]]]]
return[name[files]] | keyword[def] identifier[_auto_client_files] ( identifier[cls] , identifier[client] , identifier[ca_path] = keyword[None] , identifier[ca_contents] = keyword[None] , identifier[cert_path] = keyword[None] ,
identifier[cert_contents] = keyword[None] , identifier[key_path] = keyword[None] , identifier[key_contents] = keyword[None] ):
literal[string]
identifier[files] =[]
keyword[if] identifier[ca_path] keyword[and] identifier[ca_contents] :
identifier[client] [ literal[string] ]= identifier[ca_path]
identifier[files] . identifier[append] ( identifier[dict] ( identifier[path] = identifier[ca_path] ,
identifier[contents] = identifier[ca_contents] ,
identifier[mode] = identifier[DEFAULT_FILE_MODE] ))
keyword[if] identifier[cert_path] keyword[and] identifier[cert_contents] :
identifier[client] [ literal[string] ]= identifier[cert_path]
identifier[files] . identifier[append] ( identifier[dict] ( identifier[path] = identifier[cert_path] ,
identifier[contents] = identifier[cert_contents] ,
identifier[mode] = identifier[DEFAULT_FILE_MODE] ))
keyword[if] identifier[key_path] keyword[and] identifier[key_contents] :
identifier[client] [ literal[string] ]= identifier[key_path]
identifier[files] . identifier[append] ( identifier[dict] ( identifier[path] = identifier[key_path] ,
identifier[contents] = identifier[key_contents] ,
identifier[mode] = identifier[DEFAULT_FILE_MODE] ,))
keyword[return] identifier[files] | def _auto_client_files(cls, client, ca_path=None, ca_contents=None, cert_path=None, cert_contents=None, key_path=None, key_contents=None):
"""
returns a list of NetJSON extra files for automatically generated clients
produces side effects in ``client`` dictionary
"""
files = []
if ca_path and ca_contents:
client['ca'] = ca_path
files.append(dict(path=ca_path, contents=ca_contents, mode=DEFAULT_FILE_MODE)) # depends on [control=['if'], data=[]]
if cert_path and cert_contents:
client['cert'] = cert_path
files.append(dict(path=cert_path, contents=cert_contents, mode=DEFAULT_FILE_MODE)) # depends on [control=['if'], data=[]]
if key_path and key_contents:
client['key'] = key_path
files.append(dict(path=key_path, contents=key_contents, mode=DEFAULT_FILE_MODE)) # depends on [control=['if'], data=[]]
return files |
def derive_readonly(self):
"""
Figures out what fields should be readonly. We iterate our field_config to find all
that have a readonly of true
"""
readonly = list(self.readonly)
for key, value in self.field_config.items():
if 'readonly' in value and value['readonly']:
readonly.append(key)
return readonly | def function[derive_readonly, parameter[self]]:
constant[
Figures out what fields should be readonly. We iterate our field_config to find all
that have a readonly of true
]
variable[readonly] assign[=] call[name[list], parameter[name[self].readonly]]
for taget[tuple[[<ast.Name object at 0x7da1b0f2a470>, <ast.Name object at 0x7da1b0f2a4a0>]]] in starred[call[name[self].field_config.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0f2bc40> begin[:]
call[name[readonly].append, parameter[name[key]]]
return[name[readonly]] | keyword[def] identifier[derive_readonly] ( identifier[self] ):
literal[string]
identifier[readonly] = identifier[list] ( identifier[self] . identifier[readonly] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[field_config] . identifier[items] ():
keyword[if] literal[string] keyword[in] identifier[value] keyword[and] identifier[value] [ literal[string] ]:
identifier[readonly] . identifier[append] ( identifier[key] )
keyword[return] identifier[readonly] | def derive_readonly(self):
"""
Figures out what fields should be readonly. We iterate our field_config to find all
that have a readonly of true
"""
readonly = list(self.readonly)
for (key, value) in self.field_config.items():
if 'readonly' in value and value['readonly']:
readonly.append(key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return readonly |
def p_Class(p):
'''
Class : AbstractModifier FinalModifier CLASS INDENTIFIER ExtendsModifier ImplementsModifier COLON Terminator ClassContent
'''
p[0] = Class(p[1], p[2], p[4], p[5], p[6], p[8], p[9]) | def function[p_Class, parameter[p]]:
constant[
Class : AbstractModifier FinalModifier CLASS INDENTIFIER ExtendsModifier ImplementsModifier COLON Terminator ClassContent
]
call[name[p]][constant[0]] assign[=] call[name[Class], parameter[call[name[p]][constant[1]], call[name[p]][constant[2]], call[name[p]][constant[4]], call[name[p]][constant[5]], call[name[p]][constant[6]], call[name[p]][constant[8]], call[name[p]][constant[9]]]] | keyword[def] identifier[p_Class] ( identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[Class] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]) | def p_Class(p):
"""
Class : AbstractModifier FinalModifier CLASS INDENTIFIER ExtendsModifier ImplementsModifier COLON Terminator ClassContent
"""
p[0] = Class(p[1], p[2], p[4], p[5], p[6], p[8], p[9]) |
def _extract_more_comments(tree):
"""Return a list of MoreComments objects removed from tree."""
more_comments = []
queue = [(None, x) for x in tree]
while len(queue) > 0:
parent, comm = queue.pop(0)
if isinstance(comm, MoreComments):
heappush(more_comments, comm)
if parent:
parent.replies.remove(comm)
else:
tree.remove(comm)
else:
for item in comm.replies:
queue.append((comm, item))
return more_comments | def function[_extract_more_comments, parameter[tree]]:
constant[Return a list of MoreComments objects removed from tree.]
variable[more_comments] assign[=] list[[]]
variable[queue] assign[=] <ast.ListComp object at 0x7da20cabd0c0>
while compare[call[name[len], parameter[name[queue]]] greater[>] constant[0]] begin[:]
<ast.Tuple object at 0x7da20cabece0> assign[=] call[name[queue].pop, parameter[constant[0]]]
if call[name[isinstance], parameter[name[comm], name[MoreComments]]] begin[:]
call[name[heappush], parameter[name[more_comments], name[comm]]]
if name[parent] begin[:]
call[name[parent].replies.remove, parameter[name[comm]]]
return[name[more_comments]] | keyword[def] identifier[_extract_more_comments] ( identifier[tree] ):
literal[string]
identifier[more_comments] =[]
identifier[queue] =[( keyword[None] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[tree] ]
keyword[while] identifier[len] ( identifier[queue] )> literal[int] :
identifier[parent] , identifier[comm] = identifier[queue] . identifier[pop] ( literal[int] )
keyword[if] identifier[isinstance] ( identifier[comm] , identifier[MoreComments] ):
identifier[heappush] ( identifier[more_comments] , identifier[comm] )
keyword[if] identifier[parent] :
identifier[parent] . identifier[replies] . identifier[remove] ( identifier[comm] )
keyword[else] :
identifier[tree] . identifier[remove] ( identifier[comm] )
keyword[else] :
keyword[for] identifier[item] keyword[in] identifier[comm] . identifier[replies] :
identifier[queue] . identifier[append] (( identifier[comm] , identifier[item] ))
keyword[return] identifier[more_comments] | def _extract_more_comments(tree):
"""Return a list of MoreComments objects removed from tree."""
more_comments = []
queue = [(None, x) for x in tree]
while len(queue) > 0:
(parent, comm) = queue.pop(0)
if isinstance(comm, MoreComments):
heappush(more_comments, comm)
if parent:
parent.replies.remove(comm) # depends on [control=['if'], data=[]]
else:
tree.remove(comm) # depends on [control=['if'], data=[]]
else:
for item in comm.replies:
queue.append((comm, item)) # depends on [control=['for'], data=['item']] # depends on [control=['while'], data=[]]
return more_comments |
def accel_zoom_out(self, *args):
"""Callback to zoom out.
"""
for term in self.get_notebook().iter_terminals():
term.decrease_font_size()
return True | def function[accel_zoom_out, parameter[self]]:
constant[Callback to zoom out.
]
for taget[name[term]] in starred[call[call[name[self].get_notebook, parameter[]].iter_terminals, parameter[]]] begin[:]
call[name[term].decrease_font_size, parameter[]]
return[constant[True]] | keyword[def] identifier[accel_zoom_out] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[for] identifier[term] keyword[in] identifier[self] . identifier[get_notebook] (). identifier[iter_terminals] ():
identifier[term] . identifier[decrease_font_size] ()
keyword[return] keyword[True] | def accel_zoom_out(self, *args):
"""Callback to zoom out.
"""
for term in self.get_notebook().iter_terminals():
term.decrease_font_size() # depends on [control=['for'], data=['term']]
return True |
def write_system_config(base_url, datadir, tooldir):
"""Write a bcbio_system.yaml configuration file with tool information.
"""
out_file = os.path.join(datadir, "galaxy", os.path.basename(base_url))
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
if os.path.exists(out_file):
# if no tool directory and exists, do not overwrite
if tooldir is None:
return out_file
else:
bak_file = out_file + ".bak%s" % (datetime.datetime.now().strftime("%Y%M%d_%H%M"))
shutil.copy(out_file, bak_file)
if tooldir:
java_basedir = os.path.join(tooldir, "share", "java")
rewrite_ignore = ("log",)
with contextlib.closing(urllib_request.urlopen(base_url)) as in_handle:
with open(out_file, "w") as out_handle:
in_resources = False
in_prog = None
for line in (l.decode("utf-8") for l in in_handle):
if line[0] != " ":
in_resources = line.startswith("resources")
in_prog = None
elif (in_resources and line[:2] == " " and line[2] != " "
and not line.strip().startswith(rewrite_ignore)):
in_prog = line.split(":")[0].strip()
# Update java directories to point to install directory, avoid special cases
elif line.strip().startswith("dir:") and in_prog and in_prog not in ["log", "tmp"]:
final_dir = os.path.basename(line.split()[-1])
if tooldir:
line = "%s: %s\n" % (line.split(":")[0],
os.path.join(java_basedir, final_dir))
in_prog = None
elif line.startswith("galaxy"):
line = "# %s" % line
out_handle.write(line)
return out_file | def function[write_system_config, parameter[base_url, datadir, tooldir]]:
constant[Write a bcbio_system.yaml configuration file with tool information.
]
variable[out_file] assign[=] call[name[os].path.join, parameter[name[datadir], constant[galaxy], call[name[os].path.basename, parameter[name[base_url]]]]]
if <ast.UnaryOp object at 0x7da1b18bee00> begin[:]
call[name[os].makedirs, parameter[call[name[os].path.dirname, parameter[name[out_file]]]]]
if call[name[os].path.exists, parameter[name[out_file]]] begin[:]
if compare[name[tooldir] is constant[None]] begin[:]
return[name[out_file]]
if name[tooldir] begin[:]
variable[java_basedir] assign[=] call[name[os].path.join, parameter[name[tooldir], constant[share], constant[java]]]
variable[rewrite_ignore] assign[=] tuple[[<ast.Constant object at 0x7da1b18bfe80>]]
with call[name[contextlib].closing, parameter[call[name[urllib_request].urlopen, parameter[name[base_url]]]]] begin[:]
with call[name[open], parameter[name[out_file], constant[w]]] begin[:]
variable[in_resources] assign[=] constant[False]
variable[in_prog] assign[=] constant[None]
for taget[name[line]] in starred[<ast.GeneratorExp object at 0x7da1b18bfac0>] begin[:]
if compare[call[name[line]][constant[0]] not_equal[!=] constant[ ]] begin[:]
variable[in_resources] assign[=] call[name[line].startswith, parameter[constant[resources]]]
variable[in_prog] assign[=] constant[None]
call[name[out_handle].write, parameter[name[line]]]
return[name[out_file]] | keyword[def] identifier[write_system_config] ( identifier[base_url] , identifier[datadir] , identifier[tooldir] ):
literal[string]
identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[datadir] , literal[string] , identifier[os] . identifier[path] . identifier[basename] ( identifier[base_url] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[out_file] )):
identifier[os] . identifier[makedirs] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[out_file] ))
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[out_file] ):
keyword[if] identifier[tooldir] keyword[is] keyword[None] :
keyword[return] identifier[out_file]
keyword[else] :
identifier[bak_file] = identifier[out_file] + literal[string] %( identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] ))
identifier[shutil] . identifier[copy] ( identifier[out_file] , identifier[bak_file] )
keyword[if] identifier[tooldir] :
identifier[java_basedir] = identifier[os] . identifier[path] . identifier[join] ( identifier[tooldir] , literal[string] , literal[string] )
identifier[rewrite_ignore] =( literal[string] ,)
keyword[with] identifier[contextlib] . identifier[closing] ( identifier[urllib_request] . identifier[urlopen] ( identifier[base_url] )) keyword[as] identifier[in_handle] :
keyword[with] identifier[open] ( identifier[out_file] , literal[string] ) keyword[as] identifier[out_handle] :
identifier[in_resources] = keyword[False]
identifier[in_prog] = keyword[None]
keyword[for] identifier[line] keyword[in] ( identifier[l] . identifier[decode] ( literal[string] ) keyword[for] identifier[l] keyword[in] identifier[in_handle] ):
keyword[if] identifier[line] [ literal[int] ]!= literal[string] :
identifier[in_resources] = identifier[line] . identifier[startswith] ( literal[string] )
identifier[in_prog] = keyword[None]
keyword[elif] ( identifier[in_resources] keyword[and] identifier[line] [: literal[int] ]== literal[string] keyword[and] identifier[line] [ literal[int] ]!= literal[string]
keyword[and] keyword[not] identifier[line] . identifier[strip] (). identifier[startswith] ( identifier[rewrite_ignore] )):
identifier[in_prog] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[elif] identifier[line] . identifier[strip] (). identifier[startswith] ( literal[string] ) keyword[and] identifier[in_prog] keyword[and] identifier[in_prog] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
identifier[final_dir] = identifier[os] . identifier[path] . identifier[basename] ( identifier[line] . identifier[split] ()[- literal[int] ])
keyword[if] identifier[tooldir] :
identifier[line] = literal[string] %( identifier[line] . identifier[split] ( literal[string] )[ literal[int] ],
identifier[os] . identifier[path] . identifier[join] ( identifier[java_basedir] , identifier[final_dir] ))
identifier[in_prog] = keyword[None]
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[line] = literal[string] % identifier[line]
identifier[out_handle] . identifier[write] ( identifier[line] )
keyword[return] identifier[out_file] | def write_system_config(base_url, datadir, tooldir):
"""Write a bcbio_system.yaml configuration file with tool information.
"""
out_file = os.path.join(datadir, 'galaxy', os.path.basename(base_url))
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file)) # depends on [control=['if'], data=[]]
if os.path.exists(out_file):
# if no tool directory and exists, do not overwrite
if tooldir is None:
return out_file # depends on [control=['if'], data=[]]
else:
bak_file = out_file + '.bak%s' % datetime.datetime.now().strftime('%Y%M%d_%H%M')
shutil.copy(out_file, bak_file) # depends on [control=['if'], data=[]]
if tooldir:
java_basedir = os.path.join(tooldir, 'share', 'java') # depends on [control=['if'], data=[]]
rewrite_ignore = ('log',)
with contextlib.closing(urllib_request.urlopen(base_url)) as in_handle:
with open(out_file, 'w') as out_handle:
in_resources = False
in_prog = None
for line in (l.decode('utf-8') for l in in_handle):
if line[0] != ' ':
in_resources = line.startswith('resources')
in_prog = None # depends on [control=['if'], data=[]]
elif in_resources and line[:2] == ' ' and (line[2] != ' ') and (not line.strip().startswith(rewrite_ignore)):
in_prog = line.split(':')[0].strip() # depends on [control=['if'], data=[]]
# Update java directories to point to install directory, avoid special cases
elif line.strip().startswith('dir:') and in_prog and (in_prog not in ['log', 'tmp']):
final_dir = os.path.basename(line.split()[-1])
if tooldir:
line = '%s: %s\n' % (line.split(':')[0], os.path.join(java_basedir, final_dir)) # depends on [control=['if'], data=[]]
in_prog = None # depends on [control=['if'], data=[]]
elif line.startswith('galaxy'):
line = '# %s' % line # depends on [control=['if'], data=[]]
out_handle.write(line) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['out_handle']] # depends on [control=['with'], data=['in_handle']]
return out_file |
def _dump_qmc(self):
"""
Makes a reduced array that excludes quartets with no information and
prints the quartets and weights to a file formatted for wQMC
"""
## open the h5 database
io5 = h5py.File(self.database.output, 'r')
## create an output file for writing
self.files.qdump = os.path.join(self.dirs, self.name+".quartets.txt")
LOGGER.info("qdump file %s", self.files.qdump)
outfile = open(self.files.qdump, 'w')
## todo: should pull quarts order in randomly? or doesn't matter?
for idx in xrange(0, self.params.nquartets, self._chunksize):
## get mask of zero weight quartets
#mask = io5["weights"][idx:idx+self.chunksize] != 0
#weight = io5["weights"][idx:idx+self.chunksize][mask]
#LOGGER.info("exluded = %s, mask shape %s",
# self._chunksize - mask.shape[0], mask.shape)
#LOGGER.info('q shape %s', io5["quartets"][idx:idx+self._chunksize].shape)
masked_quartets = io5["quartets"][idx:idx+self._chunksize, :]#[mask, :]
quarts = [list(j) for j in masked_quartets]
## format and print
#chunk = ["{},{}|{},{}:{}".format(*i+[j]) for i, j \
# in zip(quarts, weight)]
chunk = ["{},{}|{},{}".format(*i) for i in quarts]
outfile.write("\n".join(chunk)+"\n")
## close output file and h5 database
outfile.close()
io5.close() | def function[_dump_qmc, parameter[self]]:
constant[
Makes a reduced array that excludes quartets with no information and
prints the quartets and weights to a file formatted for wQMC
]
variable[io5] assign[=] call[name[h5py].File, parameter[name[self].database.output, constant[r]]]
name[self].files.qdump assign[=] call[name[os].path.join, parameter[name[self].dirs, binary_operation[name[self].name + constant[.quartets.txt]]]]
call[name[LOGGER].info, parameter[constant[qdump file %s], name[self].files.qdump]]
variable[outfile] assign[=] call[name[open], parameter[name[self].files.qdump, constant[w]]]
for taget[name[idx]] in starred[call[name[xrange], parameter[constant[0], name[self].params.nquartets, name[self]._chunksize]]] begin[:]
variable[masked_quartets] assign[=] call[call[name[io5]][constant[quartets]]][tuple[[<ast.Slice object at 0x7da207f032e0>, <ast.Slice object at 0x7da207f00a60>]]]
variable[quarts] assign[=] <ast.ListComp object at 0x7da207f036d0>
variable[chunk] assign[=] <ast.ListComp object at 0x7da207f02d10>
call[name[outfile].write, parameter[binary_operation[call[constant[
].join, parameter[name[chunk]]] + constant[
]]]]
call[name[outfile].close, parameter[]]
call[name[io5].close, parameter[]] | keyword[def] identifier[_dump_qmc] ( identifier[self] ):
literal[string]
identifier[io5] = identifier[h5py] . identifier[File] ( identifier[self] . identifier[database] . identifier[output] , literal[string] )
identifier[self] . identifier[files] . identifier[qdump] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[dirs] , identifier[self] . identifier[name] + literal[string] )
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[self] . identifier[files] . identifier[qdump] )
identifier[outfile] = identifier[open] ( identifier[self] . identifier[files] . identifier[qdump] , literal[string] )
keyword[for] identifier[idx] keyword[in] identifier[xrange] ( literal[int] , identifier[self] . identifier[params] . identifier[nquartets] , identifier[self] . identifier[_chunksize] ):
identifier[masked_quartets] = identifier[io5] [ literal[string] ][ identifier[idx] : identifier[idx] + identifier[self] . identifier[_chunksize] ,:]
identifier[quarts] =[ identifier[list] ( identifier[j] ) keyword[for] identifier[j] keyword[in] identifier[masked_quartets] ]
identifier[chunk] =[ literal[string] . identifier[format] (* identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[quarts] ]
identifier[outfile] . identifier[write] ( literal[string] . identifier[join] ( identifier[chunk] )+ literal[string] )
identifier[outfile] . identifier[close] ()
identifier[io5] . identifier[close] () | def _dump_qmc(self):
"""
Makes a reduced array that excludes quartets with no information and
prints the quartets and weights to a file formatted for wQMC
"""
## open the h5 database
io5 = h5py.File(self.database.output, 'r')
## create an output file for writing
self.files.qdump = os.path.join(self.dirs, self.name + '.quartets.txt')
LOGGER.info('qdump file %s', self.files.qdump)
outfile = open(self.files.qdump, 'w')
## todo: should pull quarts order in randomly? or doesn't matter?
for idx in xrange(0, self.params.nquartets, self._chunksize):
## get mask of zero weight quartets
#mask = io5["weights"][idx:idx+self.chunksize] != 0
#weight = io5["weights"][idx:idx+self.chunksize][mask]
#LOGGER.info("exluded = %s, mask shape %s",
# self._chunksize - mask.shape[0], mask.shape)
#LOGGER.info('q shape %s', io5["quartets"][idx:idx+self._chunksize].shape)
masked_quartets = io5['quartets'][idx:idx + self._chunksize, :] #[mask, :]
quarts = [list(j) for j in masked_quartets]
## format and print
#chunk = ["{},{}|{},{}:{}".format(*i+[j]) for i, j \
# in zip(quarts, weight)]
chunk = ['{},{}|{},{}'.format(*i) for i in quarts]
outfile.write('\n'.join(chunk) + '\n') # depends on [control=['for'], data=['idx']]
## close output file and h5 database
outfile.close()
io5.close() |
def flavor_access_add(self, flavor_id, project_id):
'''
Add a project to the flavor access list
'''
nt_ks = self.compute_conn
ret = {flavor_id: []}
flavor_accesses = nt_ks.flavor_access.add_tenant_access(flavor_id, project_id)
for project in flavor_accesses:
ret[flavor_id].append(project.tenant_id)
return ret | def function[flavor_access_add, parameter[self, flavor_id, project_id]]:
constant[
Add a project to the flavor access list
]
variable[nt_ks] assign[=] name[self].compute_conn
variable[ret] assign[=] dictionary[[<ast.Name object at 0x7da1b21ee200>], [<ast.List object at 0x7da1b21edff0>]]
variable[flavor_accesses] assign[=] call[name[nt_ks].flavor_access.add_tenant_access, parameter[name[flavor_id], name[project_id]]]
for taget[name[project]] in starred[name[flavor_accesses]] begin[:]
call[call[name[ret]][name[flavor_id]].append, parameter[name[project].tenant_id]]
return[name[ret]] | keyword[def] identifier[flavor_access_add] ( identifier[self] , identifier[flavor_id] , identifier[project_id] ):
literal[string]
identifier[nt_ks] = identifier[self] . identifier[compute_conn]
identifier[ret] ={ identifier[flavor_id] :[]}
identifier[flavor_accesses] = identifier[nt_ks] . identifier[flavor_access] . identifier[add_tenant_access] ( identifier[flavor_id] , identifier[project_id] )
keyword[for] identifier[project] keyword[in] identifier[flavor_accesses] :
identifier[ret] [ identifier[flavor_id] ]. identifier[append] ( identifier[project] . identifier[tenant_id] )
keyword[return] identifier[ret] | def flavor_access_add(self, flavor_id, project_id):
"""
Add a project to the flavor access list
"""
nt_ks = self.compute_conn
ret = {flavor_id: []}
flavor_accesses = nt_ks.flavor_access.add_tenant_access(flavor_id, project_id)
for project in flavor_accesses:
ret[flavor_id].append(project.tenant_id) # depends on [control=['for'], data=['project']]
return ret |
def add_alert_tag(self, id, tag_value, **kwargs): # noqa: E501
"""Add a tag to a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_alert_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
else:
(data) = self.add_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data | def function[add_alert_tag, parameter[self, id, tag_value]]:
constant[Add a tag to a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_alert_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].add_alert_tag_with_http_info, parameter[name[id], name[tag_value]]]] | keyword[def] identifier[add_alert_tag] ( identifier[self] , identifier[id] , identifier[tag_value] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[add_alert_tag_with_http_info] ( identifier[id] , identifier[tag_value] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[add_alert_tag_with_http_info] ( identifier[id] , identifier[tag_value] ,** identifier[kwargs] )
keyword[return] identifier[data] | def add_alert_tag(self, id, tag_value, **kwargs): # noqa: E501
'Add a tag to a specific alert # noqa: E501\n\n # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.add_alert_tag(id, tag_value, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str id: (required)\n :param str tag_value: (required)\n :return: ResponseContainer\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.add_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data |
def _get_loader(config):
"""Determine which config file type and loader to use based on a filename.
:param config str: filename to config file
:return: a tuple of the loader type and callable to load
:rtype: (str, Callable)
"""
if config.endswith('.yml') or config.endswith('.yaml'):
if not yaml:
LOGGER.error("pyyaml must be installed to use the YAML loader")
# TODO: stop reactor if running
return None, None
return 'yaml', yaml.load
else:
return 'json', json.loads | def function[_get_loader, parameter[config]]:
constant[Determine which config file type and loader to use based on a filename.
:param config str: filename to config file
:return: a tuple of the loader type and callable to load
:rtype: (str, Callable)
]
if <ast.BoolOp object at 0x7da1b0efa470> begin[:]
if <ast.UnaryOp object at 0x7da1b0ef9840> begin[:]
call[name[LOGGER].error, parameter[constant[pyyaml must be installed to use the YAML loader]]]
return[tuple[[<ast.Constant object at 0x7da1b0efb250>, <ast.Constant object at 0x7da1b0efbdf0>]]]
return[tuple[[<ast.Constant object at 0x7da1b0ef98d0>, <ast.Attribute object at 0x7da1b0efb850>]]] | keyword[def] identifier[_get_loader] ( identifier[config] ):
literal[string]
keyword[if] identifier[config] . identifier[endswith] ( literal[string] ) keyword[or] identifier[config] . identifier[endswith] ( literal[string] ):
keyword[if] keyword[not] identifier[yaml] :
identifier[LOGGER] . identifier[error] ( literal[string] )
keyword[return] keyword[None] , keyword[None]
keyword[return] literal[string] , identifier[yaml] . identifier[load]
keyword[else] :
keyword[return] literal[string] , identifier[json] . identifier[loads] | def _get_loader(config):
"""Determine which config file type and loader to use based on a filename.
:param config str: filename to config file
:return: a tuple of the loader type and callable to load
:rtype: (str, Callable)
"""
if config.endswith('.yml') or config.endswith('.yaml'):
if not yaml:
LOGGER.error('pyyaml must be installed to use the YAML loader')
# TODO: stop reactor if running
return (None, None) # depends on [control=['if'], data=[]]
return ('yaml', yaml.load) # depends on [control=['if'], data=[]]
else:
return ('json', json.loads) |
def decode_ber(ber):
"""
Decodes a ber length byte array into an integer
return: (length, bytes_read) - a tuple of values
"""
ber = bytearray(ber)
length = ber[0]
bytes_read = 1
if length > 127:
bytes_read += length & 127 # Strip off the high bit
length = 0
for i in range(1, bytes_read):
length += ber[i] << (8 * (bytes_read - i - 1))
return length, bytes_read | def function[decode_ber, parameter[ber]]:
constant[
Decodes a ber length byte array into an integer
return: (length, bytes_read) - a tuple of values
]
variable[ber] assign[=] call[name[bytearray], parameter[name[ber]]]
variable[length] assign[=] call[name[ber]][constant[0]]
variable[bytes_read] assign[=] constant[1]
if compare[name[length] greater[>] constant[127]] begin[:]
<ast.AugAssign object at 0x7da204567940>
variable[length] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[bytes_read]]]] begin[:]
<ast.AugAssign object at 0x7da204567ee0>
return[tuple[[<ast.Name object at 0x7da204565870>, <ast.Name object at 0x7da2045679d0>]]] | keyword[def] identifier[decode_ber] ( identifier[ber] ):
literal[string]
identifier[ber] = identifier[bytearray] ( identifier[ber] )
identifier[length] = identifier[ber] [ literal[int] ]
identifier[bytes_read] = literal[int]
keyword[if] identifier[length] > literal[int] :
identifier[bytes_read] += identifier[length] & literal[int]
identifier[length] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[bytes_read] ):
identifier[length] += identifier[ber] [ identifier[i] ]<<( literal[int] *( identifier[bytes_read] - identifier[i] - literal[int] ))
keyword[return] identifier[length] , identifier[bytes_read] | def decode_ber(ber):
"""
Decodes a ber length byte array into an integer
return: (length, bytes_read) - a tuple of values
"""
ber = bytearray(ber)
length = ber[0]
bytes_read = 1
if length > 127:
bytes_read += length & 127 # Strip off the high bit
length = 0
for i in range(1, bytes_read):
length += ber[i] << 8 * (bytes_read - i - 1) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=['length']]
return (length, bytes_read) |
def ncbi_geneid(self):
"""
Retrieve this feature's NCBI GeneID if it's present.
NCBI GFF3 files contain gene IDs encoded in **Dbxref** attributes
(example: `Dbxref=GeneID:103504972`). This function locates and returns
the GeneID if present, or returns `None` otherwise.
"""
values = self.get_attribute('Dbxref', as_list=True)
if values is None:
return None
for value in values:
if value.startswith('GeneID:'):
key, geneid = value.split(':')
return geneid
return None | def function[ncbi_geneid, parameter[self]]:
constant[
Retrieve this feature's NCBI GeneID if it's present.
NCBI GFF3 files contain gene IDs encoded in **Dbxref** attributes
(example: `Dbxref=GeneID:103504972`). This function locates and returns
the GeneID if present, or returns `None` otherwise.
]
variable[values] assign[=] call[name[self].get_attribute, parameter[constant[Dbxref]]]
if compare[name[values] is constant[None]] begin[:]
return[constant[None]]
for taget[name[value]] in starred[name[values]] begin[:]
if call[name[value].startswith, parameter[constant[GeneID:]]] begin[:]
<ast.Tuple object at 0x7da18dc99f90> assign[=] call[name[value].split, parameter[constant[:]]]
return[name[geneid]]
return[constant[None]] | keyword[def] identifier[ncbi_geneid] ( identifier[self] ):
literal[string]
identifier[values] = identifier[self] . identifier[get_attribute] ( literal[string] , identifier[as_list] = keyword[True] )
keyword[if] identifier[values] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[for] identifier[value] keyword[in] identifier[values] :
keyword[if] identifier[value] . identifier[startswith] ( literal[string] ):
identifier[key] , identifier[geneid] = identifier[value] . identifier[split] ( literal[string] )
keyword[return] identifier[geneid]
keyword[return] keyword[None] | def ncbi_geneid(self):
"""
Retrieve this feature's NCBI GeneID if it's present.
NCBI GFF3 files contain gene IDs encoded in **Dbxref** attributes
(example: `Dbxref=GeneID:103504972`). This function locates and returns
the GeneID if present, or returns `None` otherwise.
"""
values = self.get_attribute('Dbxref', as_list=True)
if values is None:
return None # depends on [control=['if'], data=[]]
for value in values:
if value.startswith('GeneID:'):
(key, geneid) = value.split(':')
return geneid # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['value']]
return None |
def get_submissions_multiple_assignments_by_sis_id(
self, is_section, sis_id, students=None, assignments=None,
**params):
"""
List submissions for multiple assignments by course/section sis id and
optionally student
https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.for_students
"""
if is_section:
return self.get_submissions_multiple_assignments(
is_section, self._sis_id(sis_id, 'section'), students,
assignments, **params)
else:
return self.get_submissions_multiple_assignments(
is_section, self._sis_id(sis_id, 'course'), students,
assignments, **params) | def function[get_submissions_multiple_assignments_by_sis_id, parameter[self, is_section, sis_id, students, assignments]]:
constant[
List submissions for multiple assignments by course/section sis id and
optionally student
https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.for_students
]
if name[is_section] begin[:]
return[call[name[self].get_submissions_multiple_assignments, parameter[name[is_section], call[name[self]._sis_id, parameter[name[sis_id], constant[section]]], name[students], name[assignments]]]] | keyword[def] identifier[get_submissions_multiple_assignments_by_sis_id] (
identifier[self] , identifier[is_section] , identifier[sis_id] , identifier[students] = keyword[None] , identifier[assignments] = keyword[None] ,
** identifier[params] ):
literal[string]
keyword[if] identifier[is_section] :
keyword[return] identifier[self] . identifier[get_submissions_multiple_assignments] (
identifier[is_section] , identifier[self] . identifier[_sis_id] ( identifier[sis_id] , literal[string] ), identifier[students] ,
identifier[assignments] ,** identifier[params] )
keyword[else] :
keyword[return] identifier[self] . identifier[get_submissions_multiple_assignments] (
identifier[is_section] , identifier[self] . identifier[_sis_id] ( identifier[sis_id] , literal[string] ), identifier[students] ,
identifier[assignments] ,** identifier[params] ) | def get_submissions_multiple_assignments_by_sis_id(self, is_section, sis_id, students=None, assignments=None, **params):
"""
List submissions for multiple assignments by course/section sis id and
optionally student
https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.for_students
"""
if is_section:
return self.get_submissions_multiple_assignments(is_section, self._sis_id(sis_id, 'section'), students, assignments, **params) # depends on [control=['if'], data=[]]
else:
return self.get_submissions_multiple_assignments(is_section, self._sis_id(sis_id, 'course'), students, assignments, **params) |
def write_equalwidth_bedfile(bedfile, width, outfile):
"""Read input from <bedfile>, set the width of all entries to <width> and
write the result to <outfile>.
Input file needs to be in BED or WIG format."""
BUFSIZE = 10000
f = open(bedfile)
out = open(outfile, "w")
lines = f.readlines(BUFSIZE)
line_count = 0
while lines:
for line in lines:
line_count += 1
if not line.startswith("#") and not line.startswith("track") and not line.startswith("browser"):
vals = line.strip().split("\t")
try:
start, end = int(vals[1]), int(vals[2])
except ValueError:
print("Error on line %s while reading %s. Is the file in BED or WIG format?" % (line_count, bedfile))
sys.exit(1)
start = (start + end) // 2 - (width // 2)
# This shifts the center, but ensures the width is identical... maybe not ideal
if start < 0:
start = 0
end = start + width
# Keep all the other information in the bedfile if it's there
if len(vals) > 3:
out.write("%s\t%s\t%s\t%s\n" % (vals[0], start, end, "\t".join(vals[3:])))
else:
out.write("%s\t%s\t%s\n" % (vals[0], start, end))
lines = f.readlines(BUFSIZE)
out.close()
f.close() | def function[write_equalwidth_bedfile, parameter[bedfile, width, outfile]]:
constant[Read input from <bedfile>, set the width of all entries to <width> and
write the result to <outfile>.
Input file needs to be in BED or WIG format.]
variable[BUFSIZE] assign[=] constant[10000]
variable[f] assign[=] call[name[open], parameter[name[bedfile]]]
variable[out] assign[=] call[name[open], parameter[name[outfile], constant[w]]]
variable[lines] assign[=] call[name[f].readlines, parameter[name[BUFSIZE]]]
variable[line_count] assign[=] constant[0]
while name[lines] begin[:]
for taget[name[line]] in starred[name[lines]] begin[:]
<ast.AugAssign object at 0x7da1b10df370>
if <ast.BoolOp object at 0x7da1b10ded10> begin[:]
variable[vals] assign[=] call[call[name[line].strip, parameter[]].split, parameter[constant[ ]]]
<ast.Try object at 0x7da204344e20>
variable[start] assign[=] binary_operation[binary_operation[binary_operation[name[start] + name[end]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]] - binary_operation[name[width] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]]
if compare[name[start] less[<] constant[0]] begin[:]
variable[start] assign[=] constant[0]
variable[end] assign[=] binary_operation[name[start] + name[width]]
if compare[call[name[len], parameter[name[vals]]] greater[>] constant[3]] begin[:]
call[name[out].write, parameter[binary_operation[constant[%s %s %s %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18f09cb50>, <ast.Name object at 0x7da18f09e2c0>, <ast.Name object at 0x7da18f09d300>, <ast.Call object at 0x7da18f09ef50>]]]]]
variable[lines] assign[=] call[name[f].readlines, parameter[name[BUFSIZE]]]
call[name[out].close, parameter[]]
call[name[f].close, parameter[]] | keyword[def] identifier[write_equalwidth_bedfile] ( identifier[bedfile] , identifier[width] , identifier[outfile] ):
literal[string]
identifier[BUFSIZE] = literal[int]
identifier[f] = identifier[open] ( identifier[bedfile] )
identifier[out] = identifier[open] ( identifier[outfile] , literal[string] )
identifier[lines] = identifier[f] . identifier[readlines] ( identifier[BUFSIZE] )
identifier[line_count] = literal[int]
keyword[while] identifier[lines] :
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[line_count] += literal[int]
keyword[if] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[vals] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )
keyword[try] :
identifier[start] , identifier[end] = identifier[int] ( identifier[vals] [ literal[int] ]), identifier[int] ( identifier[vals] [ literal[int] ])
keyword[except] identifier[ValueError] :
identifier[print] ( literal[string] %( identifier[line_count] , identifier[bedfile] ))
identifier[sys] . identifier[exit] ( literal[int] )
identifier[start] =( identifier[start] + identifier[end] )// literal[int] -( identifier[width] // literal[int] )
keyword[if] identifier[start] < literal[int] :
identifier[start] = literal[int]
identifier[end] = identifier[start] + identifier[width]
keyword[if] identifier[len] ( identifier[vals] )> literal[int] :
identifier[out] . identifier[write] ( literal[string] %( identifier[vals] [ literal[int] ], identifier[start] , identifier[end] , literal[string] . identifier[join] ( identifier[vals] [ literal[int] :])))
keyword[else] :
identifier[out] . identifier[write] ( literal[string] %( identifier[vals] [ literal[int] ], identifier[start] , identifier[end] ))
identifier[lines] = identifier[f] . identifier[readlines] ( identifier[BUFSIZE] )
identifier[out] . identifier[close] ()
identifier[f] . identifier[close] () | def write_equalwidth_bedfile(bedfile, width, outfile):
"""Read input from <bedfile>, set the width of all entries to <width> and
write the result to <outfile>.
Input file needs to be in BED or WIG format."""
BUFSIZE = 10000
f = open(bedfile)
out = open(outfile, 'w')
lines = f.readlines(BUFSIZE)
line_count = 0
while lines:
for line in lines:
line_count += 1
if not line.startswith('#') and (not line.startswith('track')) and (not line.startswith('browser')):
vals = line.strip().split('\t')
try:
(start, end) = (int(vals[1]), int(vals[2])) # depends on [control=['try'], data=[]]
except ValueError:
print('Error on line %s while reading %s. Is the file in BED or WIG format?' % (line_count, bedfile))
sys.exit(1) # depends on [control=['except'], data=[]]
start = (start + end) // 2 - width // 2
# This shifts the center, but ensures the width is identical... maybe not ideal
if start < 0:
start = 0 # depends on [control=['if'], data=['start']]
end = start + width
# Keep all the other information in the bedfile if it's there
if len(vals) > 3:
out.write('%s\t%s\t%s\t%s\n' % (vals[0], start, end, '\t'.join(vals[3:]))) # depends on [control=['if'], data=[]]
else:
out.write('%s\t%s\t%s\n' % (vals[0], start, end)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
lines = f.readlines(BUFSIZE) # depends on [control=['while'], data=[]]
out.close()
f.close() |
def create_widget(self):
""" Create the underlying widget.
"""
d = self.declaration
self.widget = SeekBar(self.get_context(), None,
d.style or '@attr/seekBarStyle') | def function[create_widget, parameter[self]]:
constant[ Create the underlying widget.
]
variable[d] assign[=] name[self].declaration
name[self].widget assign[=] call[name[SeekBar], parameter[call[name[self].get_context, parameter[]], constant[None], <ast.BoolOp object at 0x7da1b1b7c820>]] | keyword[def] identifier[create_widget] ( identifier[self] ):
literal[string]
identifier[d] = identifier[self] . identifier[declaration]
identifier[self] . identifier[widget] = identifier[SeekBar] ( identifier[self] . identifier[get_context] (), keyword[None] ,
identifier[d] . identifier[style] keyword[or] literal[string] ) | def create_widget(self):
""" Create the underlying widget.
"""
d = self.declaration
self.widget = SeekBar(self.get_context(), None, d.style or '@attr/seekBarStyle') |
def _get_handler(self, handler_class):
"""Return an existing class of handler."""
element = None
for handler in self.handlers:
if isinstance(handler, handler_class):
element = handler
break
return element | def function[_get_handler, parameter[self, handler_class]]:
constant[Return an existing class of handler.]
variable[element] assign[=] constant[None]
for taget[name[handler]] in starred[name[self].handlers] begin[:]
if call[name[isinstance], parameter[name[handler], name[handler_class]]] begin[:]
variable[element] assign[=] name[handler]
break
return[name[element]] | keyword[def] identifier[_get_handler] ( identifier[self] , identifier[handler_class] ):
literal[string]
identifier[element] = keyword[None]
keyword[for] identifier[handler] keyword[in] identifier[self] . identifier[handlers] :
keyword[if] identifier[isinstance] ( identifier[handler] , identifier[handler_class] ):
identifier[element] = identifier[handler]
keyword[break]
keyword[return] identifier[element] | def _get_handler(self, handler_class):
"""Return an existing class of handler."""
element = None
for handler in self.handlers:
if isinstance(handler, handler_class):
element = handler
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['handler']]
return element |
def is_closed(self):
""" Check if session was closed. """
return (self.state == SESSION_STATE.CLOSED
or self.state == SESSION_STATE.CLOSING) | def function[is_closed, parameter[self]]:
constant[ Check if session was closed. ]
return[<ast.BoolOp object at 0x7da20c991510>] | keyword[def] identifier[is_closed] ( identifier[self] ):
literal[string]
keyword[return] ( identifier[self] . identifier[state] == identifier[SESSION_STATE] . identifier[CLOSED]
keyword[or] identifier[self] . identifier[state] == identifier[SESSION_STATE] . identifier[CLOSING] ) | def is_closed(self):
""" Check if session was closed. """
return self.state == SESSION_STATE.CLOSED or self.state == SESSION_STATE.CLOSING |
def _copy_database_data_clientside(self, tables, source, destination):
"""Copy the data from a table into another table."""
# Retrieve database rows
rows = self.get_database_rows(tables, source)
# Retrieve database columns
cols = self.get_database_columns(tables, source)
# Validate rows and columns
for r in list(rows.keys()):
assert r in tables
for c in list(cols.keys()):
assert c in tables
# Change database to destination
self.change_db(destination)
# Get insert queries
insert_queries = self._get_insert_commands(rows, cols)
# Execute insert queries
self._execute_insert_commands(insert_queries) | def function[_copy_database_data_clientside, parameter[self, tables, source, destination]]:
constant[Copy the data from a table into another table.]
variable[rows] assign[=] call[name[self].get_database_rows, parameter[name[tables], name[source]]]
variable[cols] assign[=] call[name[self].get_database_columns, parameter[name[tables], name[source]]]
for taget[name[r]] in starred[call[name[list], parameter[call[name[rows].keys, parameter[]]]]] begin[:]
assert[compare[name[r] in name[tables]]]
for taget[name[c]] in starred[call[name[list], parameter[call[name[cols].keys, parameter[]]]]] begin[:]
assert[compare[name[c] in name[tables]]]
call[name[self].change_db, parameter[name[destination]]]
variable[insert_queries] assign[=] call[name[self]._get_insert_commands, parameter[name[rows], name[cols]]]
call[name[self]._execute_insert_commands, parameter[name[insert_queries]]] | keyword[def] identifier[_copy_database_data_clientside] ( identifier[self] , identifier[tables] , identifier[source] , identifier[destination] ):
literal[string]
identifier[rows] = identifier[self] . identifier[get_database_rows] ( identifier[tables] , identifier[source] )
identifier[cols] = identifier[self] . identifier[get_database_columns] ( identifier[tables] , identifier[source] )
keyword[for] identifier[r] keyword[in] identifier[list] ( identifier[rows] . identifier[keys] ()):
keyword[assert] identifier[r] keyword[in] identifier[tables]
keyword[for] identifier[c] keyword[in] identifier[list] ( identifier[cols] . identifier[keys] ()):
keyword[assert] identifier[c] keyword[in] identifier[tables]
identifier[self] . identifier[change_db] ( identifier[destination] )
identifier[insert_queries] = identifier[self] . identifier[_get_insert_commands] ( identifier[rows] , identifier[cols] )
identifier[self] . identifier[_execute_insert_commands] ( identifier[insert_queries] ) | def _copy_database_data_clientside(self, tables, source, destination):
"""Copy the data from a table into another table."""
# Retrieve database rows
rows = self.get_database_rows(tables, source)
# Retrieve database columns
cols = self.get_database_columns(tables, source)
# Validate rows and columns
for r in list(rows.keys()):
assert r in tables # depends on [control=['for'], data=['r']]
for c in list(cols.keys()):
assert c in tables # depends on [control=['for'], data=['c']]
# Change database to destination
self.change_db(destination)
# Get insert queries
insert_queries = self._get_insert_commands(rows, cols)
# Execute insert queries
self._execute_insert_commands(insert_queries) |
def _apply_to_instance(self, project_id, instance_id, configuration_name, node_count,
display_name, func):
"""
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the GCP project that owns the Cloud Spanner
database.
:type project_id: str
:param instance_id: The ID of the instance.
:type instance_id: str
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:type configuration_name: str
:param node_count: (Optional) Number of nodes allocated to the instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:type display_name: str
:param func: Method of the instance to be called.
:type func: Callable
"""
# noinspection PyUnresolvedReferences
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id, configuration_name=configuration_name,
node_count=node_count, display_name=display_name)
try:
operation = func(instance) # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result) | def function[_apply_to_instance, parameter[self, project_id, instance_id, configuration_name, node_count, display_name, func]]:
constant[
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the GCP project that owns the Cloud Spanner
database.
:type project_id: str
:param instance_id: The ID of the instance.
:type instance_id: str
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:type configuration_name: str
:param node_count: (Optional) Number of nodes allocated to the instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:type display_name: str
:param func: Method of the instance to be called.
:type func: Callable
]
variable[instance] assign[=] call[call[name[self]._get_client, parameter[]].instance, parameter[]]
<ast.Try object at 0x7da1b035e200>
if name[operation] begin[:]
variable[result] assign[=] call[name[operation].result, parameter[]]
call[name[self].log.info, parameter[name[result]]] | keyword[def] identifier[_apply_to_instance] ( identifier[self] , identifier[project_id] , identifier[instance_id] , identifier[configuration_name] , identifier[node_count] ,
identifier[display_name] , identifier[func] ):
literal[string]
identifier[instance] = identifier[self] . identifier[_get_client] ( identifier[project_id] = identifier[project_id] ). identifier[instance] (
identifier[instance_id] = identifier[instance_id] , identifier[configuration_name] = identifier[configuration_name] ,
identifier[node_count] = identifier[node_count] , identifier[display_name] = identifier[display_name] )
keyword[try] :
identifier[operation] = identifier[func] ( identifier[instance] )
keyword[except] identifier[GoogleAPICallError] keyword[as] identifier[e] :
identifier[self] . identifier[log] . identifier[error] ( literal[string] , identifier[e] . identifier[message] )
keyword[raise] identifier[e]
keyword[if] identifier[operation] :
identifier[result] = identifier[operation] . identifier[result] ()
identifier[self] . identifier[log] . identifier[info] ( identifier[result] ) | def _apply_to_instance(self, project_id, instance_id, configuration_name, node_count, display_name, func):
"""
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the GCP project that owns the Cloud Spanner
database.
:type project_id: str
:param instance_id: The ID of the instance.
:type instance_id: str
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:type configuration_name: str
:param node_count: (Optional) Number of nodes allocated to the instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:type display_name: str
:param func: Method of the instance to be called.
:type func: Callable
"""
# noinspection PyUnresolvedReferences
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id, configuration_name=configuration_name, node_count=node_count, display_name=display_name)
try:
operation = func(instance) # type: Operation # depends on [control=['try'], data=[]]
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e # depends on [control=['except'], data=['e']]
if operation:
result = operation.result()
self.log.info(result) # depends on [control=['if'], data=[]] |
def error(*args):
"""Display error message via stderr or GUI."""
if sys.stdin.isatty():
print('ERROR:', *args, file=sys.stderr)
else:
notify_error(*args) | def function[error, parameter[]]:
constant[Display error message via stderr or GUI.]
if call[name[sys].stdin.isatty, parameter[]] begin[:]
call[name[print], parameter[constant[ERROR:], <ast.Starred object at 0x7da1b1341c90>]] | keyword[def] identifier[error] (* identifier[args] ):
literal[string]
keyword[if] identifier[sys] . identifier[stdin] . identifier[isatty] ():
identifier[print] ( literal[string] ,* identifier[args] , identifier[file] = identifier[sys] . identifier[stderr] )
keyword[else] :
identifier[notify_error] (* identifier[args] ) | def error(*args):
"""Display error message via stderr or GUI."""
if sys.stdin.isatty():
print('ERROR:', *args, file=sys.stderr) # depends on [control=['if'], data=[]]
else:
notify_error(*args) |
def find_gui_and_backend(gui=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.zmq.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend | def function[find_gui_and_backend, parameter[gui]]:
constant[Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.zmq.pylab.backend_inline').
]
import module[matplotlib]
if <ast.BoolOp object at 0x7da20c7cb610> begin[:]
variable[backend] assign[=] call[name[backends]][name[gui]]
return[tuple[[<ast.Name object at 0x7da20c7cae30>, <ast.Name object at 0x7da20c7c9a20>]]] | keyword[def] identifier[find_gui_and_backend] ( identifier[gui] = keyword[None] ):
literal[string]
keyword[import] identifier[matplotlib]
keyword[if] identifier[gui] keyword[and] identifier[gui] != literal[string] :
identifier[backend] = identifier[backends] [ identifier[gui] ]
keyword[else] :
identifier[backend] = identifier[matplotlib] . identifier[rcParams] [ literal[string] ]
identifier[gui] = identifier[backend2gui] . identifier[get] ( identifier[backend] , keyword[None] )
keyword[return] identifier[gui] , identifier[backend] | def find_gui_and_backend(gui=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.zmq.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui] # depends on [control=['if'], data=[]]
else:
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return (gui, backend) |
def __get_settings(self):
"""
Returns the current search and replace settings.
:return: Settings.
:rtype: dict
"""
return {"case_sensitive": self.Case_Sensitive_checkBox.isChecked(),
"whole_word": self.Whole_Word_checkBox.isChecked(),
"regular_expressions": self.Regular_Expressions_checkBox.isChecked(),
"backward_search": self.Backward_Search_checkBox.isChecked(),
"wrap_around": self.Wrap_Around_checkBox.isChecked()} | def function[__get_settings, parameter[self]]:
constant[
Returns the current search and replace settings.
:return: Settings.
:rtype: dict
]
return[dictionary[[<ast.Constant object at 0x7da1b09bff10>, <ast.Constant object at 0x7da1b09bee30>, <ast.Constant object at 0x7da1b09bf280>, <ast.Constant object at 0x7da1b09bf670>, <ast.Constant object at 0x7da1b09bc7c0>], [<ast.Call object at 0x7da1b09be650>, <ast.Call object at 0x7da1b09beec0>, <ast.Call object at 0x7da1b09bd9c0>, <ast.Call object at 0x7da1b09bc910>, <ast.Call object at 0x7da1b09bcf70>]]] | keyword[def] identifier[__get_settings] ( identifier[self] ):
literal[string]
keyword[return] { literal[string] : identifier[self] . identifier[Case_Sensitive_checkBox] . identifier[isChecked] (),
literal[string] : identifier[self] . identifier[Whole_Word_checkBox] . identifier[isChecked] (),
literal[string] : identifier[self] . identifier[Regular_Expressions_checkBox] . identifier[isChecked] (),
literal[string] : identifier[self] . identifier[Backward_Search_checkBox] . identifier[isChecked] (),
literal[string] : identifier[self] . identifier[Wrap_Around_checkBox] . identifier[isChecked] ()} | def __get_settings(self):
"""
Returns the current search and replace settings.
:return: Settings.
:rtype: dict
"""
return {'case_sensitive': self.Case_Sensitive_checkBox.isChecked(), 'whole_word': self.Whole_Word_checkBox.isChecked(), 'regular_expressions': self.Regular_Expressions_checkBox.isChecked(), 'backward_search': self.Backward_Search_checkBox.isChecked(), 'wrap_around': self.Wrap_Around_checkBox.isChecked()} |
def make_graph(pkg):
"""Returns a dictionary of information about pkg & its recursive deps.
Given a string, which can be parsed as a requirement specifier, return a
dictionary where each key is the name of pkg or one of its recursive
dependencies, and each value is a dictionary returned by research_package.
(No, it's not really a graph.)
"""
ignore = ['argparse', 'pip', 'setuptools', 'wsgiref']
pkg_deps = recursive_dependencies(pkg_resources.Requirement.parse(pkg))
dependencies = {key: {} for key in pkg_deps if key not in ignore}
installed_packages = pkg_resources.working_set
versions = {package.key: package.version for package in installed_packages}
for package in dependencies:
try:
dependencies[package]['version'] = versions[package]
except KeyError:
warnings.warn("{} is not installed so we cannot compute "
"resources for its dependencies.".format(package),
PackageNotInstalledWarning)
dependencies[package]['version'] = None
for package in dependencies:
package_data = research_package(package, dependencies[package]['version'])
dependencies[package].update(package_data)
return OrderedDict(
[(package, dependencies[package]) for package in sorted(dependencies.keys())]
) | def function[make_graph, parameter[pkg]]:
constant[Returns a dictionary of information about pkg & its recursive deps.
Given a string, which can be parsed as a requirement specifier, return a
dictionary where each key is the name of pkg or one of its recursive
dependencies, and each value is a dictionary returned by research_package.
(No, it's not really a graph.)
]
variable[ignore] assign[=] list[[<ast.Constant object at 0x7da20e9575b0>, <ast.Constant object at 0x7da20e956200>, <ast.Constant object at 0x7da20e955b70>, <ast.Constant object at 0x7da20e957940>]]
variable[pkg_deps] assign[=] call[name[recursive_dependencies], parameter[call[name[pkg_resources].Requirement.parse, parameter[name[pkg]]]]]
variable[dependencies] assign[=] <ast.DictComp object at 0x7da20e9572e0>
variable[installed_packages] assign[=] name[pkg_resources].working_set
variable[versions] assign[=] <ast.DictComp object at 0x7da20e955c90>
for taget[name[package]] in starred[name[dependencies]] begin[:]
<ast.Try object at 0x7da20e957af0>
for taget[name[package]] in starred[name[dependencies]] begin[:]
variable[package_data] assign[=] call[name[research_package], parameter[name[package], call[call[name[dependencies]][name[package]]][constant[version]]]]
call[call[name[dependencies]][name[package]].update, parameter[name[package_data]]]
return[call[name[OrderedDict], parameter[<ast.ListComp object at 0x7da20cabfc70>]]] | keyword[def] identifier[make_graph] ( identifier[pkg] ):
literal[string]
identifier[ignore] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[pkg_deps] = identifier[recursive_dependencies] ( identifier[pkg_resources] . identifier[Requirement] . identifier[parse] ( identifier[pkg] ))
identifier[dependencies] ={ identifier[key] :{} keyword[for] identifier[key] keyword[in] identifier[pkg_deps] keyword[if] identifier[key] keyword[not] keyword[in] identifier[ignore] }
identifier[installed_packages] = identifier[pkg_resources] . identifier[working_set]
identifier[versions] ={ identifier[package] . identifier[key] : identifier[package] . identifier[version] keyword[for] identifier[package] keyword[in] identifier[installed_packages] }
keyword[for] identifier[package] keyword[in] identifier[dependencies] :
keyword[try] :
identifier[dependencies] [ identifier[package] ][ literal[string] ]= identifier[versions] [ identifier[package] ]
keyword[except] identifier[KeyError] :
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] . identifier[format] ( identifier[package] ),
identifier[PackageNotInstalledWarning] )
identifier[dependencies] [ identifier[package] ][ literal[string] ]= keyword[None]
keyword[for] identifier[package] keyword[in] identifier[dependencies] :
identifier[package_data] = identifier[research_package] ( identifier[package] , identifier[dependencies] [ identifier[package] ][ literal[string] ])
identifier[dependencies] [ identifier[package] ]. identifier[update] ( identifier[package_data] )
keyword[return] identifier[OrderedDict] (
[( identifier[package] , identifier[dependencies] [ identifier[package] ]) keyword[for] identifier[package] keyword[in] identifier[sorted] ( identifier[dependencies] . identifier[keys] ())]
) | def make_graph(pkg):
"""Returns a dictionary of information about pkg & its recursive deps.
Given a string, which can be parsed as a requirement specifier, return a
dictionary where each key is the name of pkg or one of its recursive
dependencies, and each value is a dictionary returned by research_package.
(No, it's not really a graph.)
"""
ignore = ['argparse', 'pip', 'setuptools', 'wsgiref']
pkg_deps = recursive_dependencies(pkg_resources.Requirement.parse(pkg))
dependencies = {key: {} for key in pkg_deps if key not in ignore}
installed_packages = pkg_resources.working_set
versions = {package.key: package.version for package in installed_packages}
for package in dependencies:
try:
dependencies[package]['version'] = versions[package] # depends on [control=['try'], data=[]]
except KeyError:
warnings.warn('{} is not installed so we cannot compute resources for its dependencies.'.format(package), PackageNotInstalledWarning)
dependencies[package]['version'] = None # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['package']]
for package in dependencies:
package_data = research_package(package, dependencies[package]['version'])
dependencies[package].update(package_data) # depends on [control=['for'], data=['package']]
return OrderedDict([(package, dependencies[package]) for package in sorted(dependencies.keys())]) |
def unregister_class(alias):
"""
Opposite of L{register_class}.
@raise UnknownClassAlias: Unknown alias.
"""
try:
x = CLASS_CACHE[alias]
except KeyError:
raise UnknownClassAlias('Unknown alias %r' % (alias,))
if not x.anonymous:
del CLASS_CACHE[x.alias]
del CLASS_CACHE[x.klass]
return x | def function[unregister_class, parameter[alias]]:
constant[
Opposite of L{register_class}.
@raise UnknownClassAlias: Unknown alias.
]
<ast.Try object at 0x7da18dc98eb0>
if <ast.UnaryOp object at 0x7da18dc9b700> begin[:]
<ast.Delete object at 0x7da18dc99e10>
<ast.Delete object at 0x7da18dc98e50>
return[name[x]] | keyword[def] identifier[unregister_class] ( identifier[alias] ):
literal[string]
keyword[try] :
identifier[x] = identifier[CLASS_CACHE] [ identifier[alias] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[UnknownClassAlias] ( literal[string] %( identifier[alias] ,))
keyword[if] keyword[not] identifier[x] . identifier[anonymous] :
keyword[del] identifier[CLASS_CACHE] [ identifier[x] . identifier[alias] ]
keyword[del] identifier[CLASS_CACHE] [ identifier[x] . identifier[klass] ]
keyword[return] identifier[x] | def unregister_class(alias):
"""
Opposite of L{register_class}.
@raise UnknownClassAlias: Unknown alias.
"""
try:
x = CLASS_CACHE[alias] # depends on [control=['try'], data=[]]
except KeyError:
raise UnknownClassAlias('Unknown alias %r' % (alias,)) # depends on [control=['except'], data=[]]
if not x.anonymous:
del CLASS_CACHE[x.alias] # depends on [control=['if'], data=[]]
del CLASS_CACHE[x.klass]
return x |
def _set_state_from_serializeable_fields_and_state(self, state, klass):
""" set only fields from state, which are present in klass.__serialize_fields """
if _debug:
logger.debug("restoring state for class %s", klass)
for field in SerializableMixIn._get_serialize_fields(klass):
if field in state:
# ensure we can set attributes. Log culprits.
try:
setattr(self, field, state.get(field))
except AttributeError:
logger.debug('field: %s', field, exc_info=True)
else:
if _debug:
logger.debug("skipped %s, because it is not contained in state", field) | def function[_set_state_from_serializeable_fields_and_state, parameter[self, state, klass]]:
constant[ set only fields from state, which are present in klass.__serialize_fields ]
if name[_debug] begin[:]
call[name[logger].debug, parameter[constant[restoring state for class %s], name[klass]]]
for taget[name[field]] in starred[call[name[SerializableMixIn]._get_serialize_fields, parameter[name[klass]]]] begin[:]
if compare[name[field] in name[state]] begin[:]
<ast.Try object at 0x7da20e9569e0> | keyword[def] identifier[_set_state_from_serializeable_fields_and_state] ( identifier[self] , identifier[state] , identifier[klass] ):
literal[string]
keyword[if] identifier[_debug] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[klass] )
keyword[for] identifier[field] keyword[in] identifier[SerializableMixIn] . identifier[_get_serialize_fields] ( identifier[klass] ):
keyword[if] identifier[field] keyword[in] identifier[state] :
keyword[try] :
identifier[setattr] ( identifier[self] , identifier[field] , identifier[state] . identifier[get] ( identifier[field] ))
keyword[except] identifier[AttributeError] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[field] , identifier[exc_info] = keyword[True] )
keyword[else] :
keyword[if] identifier[_debug] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[field] ) | def _set_state_from_serializeable_fields_and_state(self, state, klass):
""" set only fields from state, which are present in klass.__serialize_fields """
if _debug:
logger.debug('restoring state for class %s', klass) # depends on [control=['if'], data=[]]
for field in SerializableMixIn._get_serialize_fields(klass):
if field in state:
# ensure we can set attributes. Log culprits.
try:
setattr(self, field, state.get(field)) # depends on [control=['try'], data=[]]
except AttributeError:
logger.debug('field: %s', field, exc_info=True) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['field', 'state']]
elif _debug:
logger.debug('skipped %s, because it is not contained in state', field) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']] |
def get_new_configured_app(dispatcher, path=DEFAULT_WEB_PATH):
"""
Create new :class:`aiohttp.web.Application` and configure it.
:param dispatcher: Dispatcher instance
:param path: Path to your webhook.
:return:
"""
app = web.Application()
configure_app(dispatcher, app, path)
return app | def function[get_new_configured_app, parameter[dispatcher, path]]:
constant[
Create new :class:`aiohttp.web.Application` and configure it.
:param dispatcher: Dispatcher instance
:param path: Path to your webhook.
:return:
]
variable[app] assign[=] call[name[web].Application, parameter[]]
call[name[configure_app], parameter[name[dispatcher], name[app], name[path]]]
return[name[app]] | keyword[def] identifier[get_new_configured_app] ( identifier[dispatcher] , identifier[path] = identifier[DEFAULT_WEB_PATH] ):
literal[string]
identifier[app] = identifier[web] . identifier[Application] ()
identifier[configure_app] ( identifier[dispatcher] , identifier[app] , identifier[path] )
keyword[return] identifier[app] | def get_new_configured_app(dispatcher, path=DEFAULT_WEB_PATH):
"""
Create new :class:`aiohttp.web.Application` and configure it.
:param dispatcher: Dispatcher instance
:param path: Path to your webhook.
:return:
"""
app = web.Application()
configure_app(dispatcher, app, path)
return app |
def cmd(self):
"""Returns the (last) saved command line.
If the file was created from a run that resumed from a checkpoint, only
the last command line used is returned.
Returns
-------
cmd : string
The command line that created this InferenceFile.
"""
cmd = self.attrs["cmd"]
if isinstance(cmd, numpy.ndarray):
cmd = cmd[-1]
return cmd | def function[cmd, parameter[self]]:
constant[Returns the (last) saved command line.
If the file was created from a run that resumed from a checkpoint, only
the last command line used is returned.
Returns
-------
cmd : string
The command line that created this InferenceFile.
]
variable[cmd] assign[=] call[name[self].attrs][constant[cmd]]
if call[name[isinstance], parameter[name[cmd], name[numpy].ndarray]] begin[:]
variable[cmd] assign[=] call[name[cmd]][<ast.UnaryOp object at 0x7da20c6e7ee0>]
return[name[cmd]] | keyword[def] identifier[cmd] ( identifier[self] ):
literal[string]
identifier[cmd] = identifier[self] . identifier[attrs] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[cmd] , identifier[numpy] . identifier[ndarray] ):
identifier[cmd] = identifier[cmd] [- literal[int] ]
keyword[return] identifier[cmd] | def cmd(self):
"""Returns the (last) saved command line.
If the file was created from a run that resumed from a checkpoint, only
the last command line used is returned.
Returns
-------
cmd : string
The command line that created this InferenceFile.
"""
cmd = self.attrs['cmd']
if isinstance(cmd, numpy.ndarray):
cmd = cmd[-1] # depends on [control=['if'], data=[]]
return cmd |
def to_utc(a_datetime, keep_utc_tzinfo=False):
"""
Convert a time awared datetime to utc datetime.
:param a_datetime: a timezone awared datetime. (If not, then just returns)
:param keep_utc_tzinfo: whether to retain the utc time zone information.
**中文文档**
将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。
"""
if a_datetime.tzinfo:
utc_datetime = a_datetime.astimezone(utc) # convert to utc time
if keep_utc_tzinfo is False:
utc_datetime = utc_datetime.replace(tzinfo=None)
return utc_datetime
else:
return a_datetime | def function[to_utc, parameter[a_datetime, keep_utc_tzinfo]]:
constant[
Convert a time awared datetime to utc datetime.
:param a_datetime: a timezone awared datetime. (If not, then just returns)
:param keep_utc_tzinfo: whether to retain the utc time zone information.
**中文文档**
将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。
]
if name[a_datetime].tzinfo begin[:]
variable[utc_datetime] assign[=] call[name[a_datetime].astimezone, parameter[name[utc]]]
if compare[name[keep_utc_tzinfo] is constant[False]] begin[:]
variable[utc_datetime] assign[=] call[name[utc_datetime].replace, parameter[]]
return[name[utc_datetime]] | keyword[def] identifier[to_utc] ( identifier[a_datetime] , identifier[keep_utc_tzinfo] = keyword[False] ):
literal[string]
keyword[if] identifier[a_datetime] . identifier[tzinfo] :
identifier[utc_datetime] = identifier[a_datetime] . identifier[astimezone] ( identifier[utc] )
keyword[if] identifier[keep_utc_tzinfo] keyword[is] keyword[False] :
identifier[utc_datetime] = identifier[utc_datetime] . identifier[replace] ( identifier[tzinfo] = keyword[None] )
keyword[return] identifier[utc_datetime]
keyword[else] :
keyword[return] identifier[a_datetime] | def to_utc(a_datetime, keep_utc_tzinfo=False):
"""
Convert a time awared datetime to utc datetime.
:param a_datetime: a timezone awared datetime. (If not, then just returns)
:param keep_utc_tzinfo: whether to retain the utc time zone information.
**中文文档**
将一个带时区的时间转化成UTC时间。而对于UTC时间而言, 有没有时区信息都无所谓了。
"""
if a_datetime.tzinfo:
utc_datetime = a_datetime.astimezone(utc) # convert to utc time
if keep_utc_tzinfo is False:
utc_datetime = utc_datetime.replace(tzinfo=None) # depends on [control=['if'], data=[]]
return utc_datetime # depends on [control=['if'], data=[]]
else:
return a_datetime |
def get_unf(N=100):
"""
Generates N uniformly distributed directions
using the way described in Fisher et al. (1987).
Parameters
__________
N : number of directions, default is 100
Returns
______
array of nested dec,inc pairs
"""
#
# get uniform directions [dec,inc]
z = random.uniform(-1., 1., size=N)
t = random.uniform(0., 360., size=N) # decs
i = np.arcsin(z) * 180. / np.pi # incs
return np.array([t, i]).transpose()
# def get_unf(N): #Jeff's way
"""
subroutine to retrieve N uniformly distributed directions
""" | def function[get_unf, parameter[N]]:
constant[
Generates N uniformly distributed directions
using the way described in Fisher et al. (1987).
Parameters
__________
N : number of directions, default is 100
Returns
______
array of nested dec,inc pairs
]
variable[z] assign[=] call[name[random].uniform, parameter[<ast.UnaryOp object at 0x7da1b04e11b0>, constant[1.0]]]
variable[t] assign[=] call[name[random].uniform, parameter[constant[0.0], constant[360.0]]]
variable[i] assign[=] binary_operation[binary_operation[call[name[np].arcsin, parameter[name[z]]] * constant[180.0]] / name[np].pi]
return[call[call[name[np].array, parameter[list[[<ast.Name object at 0x7da2044c0760>, <ast.Name object at 0x7da2044c1fc0>]]]].transpose, parameter[]]]
constant[
subroutine to retrieve N uniformly distributed directions
] | keyword[def] identifier[get_unf] ( identifier[N] = literal[int] ):
literal[string]
identifier[z] = identifier[random] . identifier[uniform] (- literal[int] , literal[int] , identifier[size] = identifier[N] )
identifier[t] = identifier[random] . identifier[uniform] ( literal[int] , literal[int] , identifier[size] = identifier[N] )
identifier[i] = identifier[np] . identifier[arcsin] ( identifier[z] )* literal[int] / identifier[np] . identifier[pi]
keyword[return] identifier[np] . identifier[array] ([ identifier[t] , identifier[i] ]). identifier[transpose] ()
literal[string] | def get_unf(N=100):
"""
Generates N uniformly distributed directions
using the way described in Fisher et al. (1987).
Parameters
__________
N : number of directions, default is 100
Returns
______
array of nested dec,inc pairs
"""
#
# get uniform directions [dec,inc]
z = random.uniform(-1.0, 1.0, size=N)
t = random.uniform(0.0, 360.0, size=N) # decs
i = np.arcsin(z) * 180.0 / np.pi # incs
return np.array([t, i]).transpose()
# def get_unf(N): #Jeff's way
'\n subroutine to retrieve N uniformly distributed directions\n ' |
def name_from_base(base, max_length=63, short=False):
"""Append a timestamp to the provided string.
This function assures that the total length of the resulting string is not
longer than the specified max length, trimming the input parameter if necessary.
Args:
base (str): String used as prefix to generate the unique name.
max_length (int): Maximum length for the resulting string.
short (bool): Whether or not to use a truncated timestamp.
Returns:
str: Input parameter with appended timestamp.
"""
timestamp = sagemaker_short_timestamp() if short else sagemaker_timestamp()
trimmed_base = base[:max_length - len(timestamp) - 1]
return '{}-{}'.format(trimmed_base, timestamp) | def function[name_from_base, parameter[base, max_length, short]]:
constant[Append a timestamp to the provided string.
This function assures that the total length of the resulting string is not
longer than the specified max length, trimming the input parameter if necessary.
Args:
base (str): String used as prefix to generate the unique name.
max_length (int): Maximum length for the resulting string.
short (bool): Whether or not to use a truncated timestamp.
Returns:
str: Input parameter with appended timestamp.
]
variable[timestamp] assign[=] <ast.IfExp object at 0x7da1b1f0bfa0>
variable[trimmed_base] assign[=] call[name[base]][<ast.Slice object at 0x7da1b1f0aa10>]
return[call[constant[{}-{}].format, parameter[name[trimmed_base], name[timestamp]]]] | keyword[def] identifier[name_from_base] ( identifier[base] , identifier[max_length] = literal[int] , identifier[short] = keyword[False] ):
literal[string]
identifier[timestamp] = identifier[sagemaker_short_timestamp] () keyword[if] identifier[short] keyword[else] identifier[sagemaker_timestamp] ()
identifier[trimmed_base] = identifier[base] [: identifier[max_length] - identifier[len] ( identifier[timestamp] )- literal[int] ]
keyword[return] literal[string] . identifier[format] ( identifier[trimmed_base] , identifier[timestamp] ) | def name_from_base(base, max_length=63, short=False):
"""Append a timestamp to the provided string.
This function assures that the total length of the resulting string is not
longer than the specified max length, trimming the input parameter if necessary.
Args:
base (str): String used as prefix to generate the unique name.
max_length (int): Maximum length for the resulting string.
short (bool): Whether or not to use a truncated timestamp.
Returns:
str: Input parameter with appended timestamp.
"""
timestamp = sagemaker_short_timestamp() if short else sagemaker_timestamp()
trimmed_base = base[:max_length - len(timestamp) - 1]
return '{}-{}'.format(trimmed_base, timestamp) |
def cut_distant_injections(workflow, inj_file, out_dir, tags=None):
"Set up a job for removing injections that are too distant to be seen"
if tags is None:
tags = []
node = Executable(workflow.cp, 'inj_cut', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--input', inj_file)
node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file')
workflow += node
return node.output_files[0] | def function[cut_distant_injections, parameter[workflow, inj_file, out_dir, tags]]:
constant[Set up a job for removing injections that are too distant to be seen]
if compare[name[tags] is constant[None]] begin[:]
variable[tags] assign[=] list[[]]
variable[node] assign[=] call[call[name[Executable], parameter[name[workflow].cp, constant[inj_cut]]].create_node, parameter[]]
call[name[node].add_input_opt, parameter[constant[--input], name[inj_file]]]
call[name[node].new_output_file_opt, parameter[name[workflow].analysis_time, constant[.xml], constant[--output-file]]]
<ast.AugAssign object at 0x7da2044c25f0>
return[call[name[node].output_files][constant[0]]] | keyword[def] identifier[cut_distant_injections] ( identifier[workflow] , identifier[inj_file] , identifier[out_dir] , identifier[tags] = keyword[None] ):
literal[string]
keyword[if] identifier[tags] keyword[is] keyword[None] :
identifier[tags] =[]
identifier[node] = identifier[Executable] ( identifier[workflow] . identifier[cp] , literal[string] , identifier[ifos] = identifier[workflow] . identifier[ifos] ,
identifier[out_dir] = identifier[out_dir] , identifier[tags] = identifier[tags] ). identifier[create_node] ()
identifier[node] . identifier[add_input_opt] ( literal[string] , identifier[inj_file] )
identifier[node] . identifier[new_output_file_opt] ( identifier[workflow] . identifier[analysis_time] , literal[string] , literal[string] )
identifier[workflow] += identifier[node]
keyword[return] identifier[node] . identifier[output_files] [ literal[int] ] | def cut_distant_injections(workflow, inj_file, out_dir, tags=None):
"""Set up a job for removing injections that are too distant to be seen"""
if tags is None:
tags = [] # depends on [control=['if'], data=['tags']]
node = Executable(workflow.cp, 'inj_cut', ifos=workflow.ifos, out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--input', inj_file)
node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file')
workflow += node
return node.output_files[0] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.