code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def env_bool(name: str,
truthy_values: Iterable[Any]=TRUE_VALUES,
required: bool=False,
default: Union[Type[empty], bool]=empty) -> bool:
"""Pulls an environment variable out of the environment returning it as a
boolean. The strings ``'True'`` and ``'true'`` are the default *truthy*
values. If not present in the environment and no default is specified,
``None`` is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param truthy_values: An iterable of values that should be considered
truthy.
:type truthy_values: iterable
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
return None
return value in TRUE_VALUES | def function[env_bool, parameter[name, truthy_values, required, default]]:
constant[Pulls an environment variable out of the environment returning it as a
boolean. The strings ``'True'`` and ``'true'`` are the default *truthy*
values. If not present in the environment and no default is specified,
``None`` is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param truthy_values: An iterable of values that should be considered
truthy.
:type truthy_values: iterable
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
]
variable[value] assign[=] call[name[get_env_value], parameter[name[name]]]
if compare[name[value] is name[empty]] begin[:]
return[constant[None]]
return[compare[name[value] in name[TRUE_VALUES]]] | keyword[def] identifier[env_bool] ( identifier[name] : identifier[str] ,
identifier[truthy_values] : identifier[Iterable] [ identifier[Any] ]= identifier[TRUE_VALUES] ,
identifier[required] : identifier[bool] = keyword[False] ,
identifier[default] : identifier[Union] [ identifier[Type] [ identifier[empty] ], identifier[bool] ]= identifier[empty] )-> identifier[bool] :
literal[string]
identifier[value] = identifier[get_env_value] ( identifier[name] , identifier[required] = identifier[required] , identifier[default] = identifier[default] )
keyword[if] identifier[value] keyword[is] identifier[empty] :
keyword[return] keyword[None]
keyword[return] identifier[value] keyword[in] identifier[TRUE_VALUES] | def env_bool(name: str, truthy_values: Iterable[Any]=TRUE_VALUES, required: bool=False, default: Union[Type[empty], bool]=empty) -> bool:
"""Pulls an environment variable out of the environment returning it as a
boolean. The strings ``'True'`` and ``'true'`` are the default *truthy*
values. If not present in the environment and no default is specified,
``None`` is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param truthy_values: An iterable of values that should be considered
truthy.
:type truthy_values: iterable
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
return None # depends on [control=['if'], data=[]]
return value in TRUE_VALUES |
def _setOutputNames(self,rootname,suffix='_drz'):
""" Define the default output filenames for drizzle products,
these are based on the original rootname of the image
filename should be just 1 filename, so call this in a loop
for chip names contained inside a file.
"""
# Define FITS output filenames for intermediate products
# Build names based on final DRIZZLE output name
# where 'output' normally would have been created
# by 'process_input()'
#
outFinal = rootname+suffix+'.fits'
outSci = rootname+suffix+'_sci.fits'
outWeight = rootname+suffix+'_wht.fits'
outContext = rootname+suffix+'_ctx.fits'
outMedian = rootname+'_med.fits'
# Build names based on input name
origFilename = self._filename.replace('.fits','_OrIg.fits')
outSky = rootname + '_sky.fits'
outSingle = rootname+'_single_sci.fits'
outSWeight = rootname+'_single_wht.fits'
crCorImage = rootname+'_crclean.fits'
# Build outputNames dictionary
fnames={
'origFilename': origFilename,
'outFinal': outFinal,
'outMedian': outMedian,
'outSci': outSci,
'outWeight': outWeight,
'outContext': outContext,
'outSingle': outSingle,
'outSWeight': outSWeight,
'outSContext': None,
'outSky': outSky,
'crcorImage': crCorImage,
'ivmFile': None
}
return fnames | def function[_setOutputNames, parameter[self, rootname, suffix]]:
constant[ Define the default output filenames for drizzle products,
these are based on the original rootname of the image
filename should be just 1 filename, so call this in a loop
for chip names contained inside a file.
]
variable[outFinal] assign[=] binary_operation[binary_operation[name[rootname] + name[suffix]] + constant[.fits]]
variable[outSci] assign[=] binary_operation[binary_operation[name[rootname] + name[suffix]] + constant[_sci.fits]]
variable[outWeight] assign[=] binary_operation[binary_operation[name[rootname] + name[suffix]] + constant[_wht.fits]]
variable[outContext] assign[=] binary_operation[binary_operation[name[rootname] + name[suffix]] + constant[_ctx.fits]]
variable[outMedian] assign[=] binary_operation[name[rootname] + constant[_med.fits]]
variable[origFilename] assign[=] call[name[self]._filename.replace, parameter[constant[.fits], constant[_OrIg.fits]]]
variable[outSky] assign[=] binary_operation[name[rootname] + constant[_sky.fits]]
variable[outSingle] assign[=] binary_operation[name[rootname] + constant[_single_sci.fits]]
variable[outSWeight] assign[=] binary_operation[name[rootname] + constant[_single_wht.fits]]
variable[crCorImage] assign[=] binary_operation[name[rootname] + constant[_crclean.fits]]
variable[fnames] assign[=] dictionary[[<ast.Constant object at 0x7da1b1bb4fa0>, <ast.Constant object at 0x7da1b1bb4e20>, <ast.Constant object at 0x7da1b1bb4fd0>, <ast.Constant object at 0x7da1b1bb5e40>, <ast.Constant object at 0x7da1b1bb4640>, <ast.Constant object at 0x7da1b1bb4550>, <ast.Constant object at 0x7da1b1bb9150>, <ast.Constant object at 0x7da1b1bb9720>, <ast.Constant object at 0x7da1b1bb83d0>, <ast.Constant object at 0x7da1b1bb9fc0>, <ast.Constant object at 0x7da1b1bba170>, <ast.Constant object at 0x7da1b1bbf580>], [<ast.Name object at 0x7da1b1bbec50>, <ast.Name object at 0x7da1b1c21b40>, <ast.Name object at 0x7da1b1c23a00>, <ast.Name object at 0x7da1b1c21db0>, <ast.Name object at 0x7da1b1c22710>, <ast.Name object at 0x7da1b1c221a0>, <ast.Name object at 0x7da1b1c21870>, <ast.Name object at 0x7da1b1c21ea0>, <ast.Constant object at 0x7da1b1c217e0>, <ast.Name object at 0x7da1b1c226b0>, <ast.Name object at 0x7da1b1c23c40>, <ast.Constant object at 0x7da1b1c21630>]]
return[name[fnames]] | keyword[def] identifier[_setOutputNames] ( identifier[self] , identifier[rootname] , identifier[suffix] = literal[string] ):
literal[string]
identifier[outFinal] = identifier[rootname] + identifier[suffix] + literal[string]
identifier[outSci] = identifier[rootname] + identifier[suffix] + literal[string]
identifier[outWeight] = identifier[rootname] + identifier[suffix] + literal[string]
identifier[outContext] = identifier[rootname] + identifier[suffix] + literal[string]
identifier[outMedian] = identifier[rootname] + literal[string]
identifier[origFilename] = identifier[self] . identifier[_filename] . identifier[replace] ( literal[string] , literal[string] )
identifier[outSky] = identifier[rootname] + literal[string]
identifier[outSingle] = identifier[rootname] + literal[string]
identifier[outSWeight] = identifier[rootname] + literal[string]
identifier[crCorImage] = identifier[rootname] + literal[string]
identifier[fnames] ={
literal[string] : identifier[origFilename] ,
literal[string] : identifier[outFinal] ,
literal[string] : identifier[outMedian] ,
literal[string] : identifier[outSci] ,
literal[string] : identifier[outWeight] ,
literal[string] : identifier[outContext] ,
literal[string] : identifier[outSingle] ,
literal[string] : identifier[outSWeight] ,
literal[string] : keyword[None] ,
literal[string] : identifier[outSky] ,
literal[string] : identifier[crCorImage] ,
literal[string] : keyword[None]
}
keyword[return] identifier[fnames] | def _setOutputNames(self, rootname, suffix='_drz'):
""" Define the default output filenames for drizzle products,
these are based on the original rootname of the image
filename should be just 1 filename, so call this in a loop
for chip names contained inside a file.
"""
# Define FITS output filenames for intermediate products
# Build names based on final DRIZZLE output name
# where 'output' normally would have been created
# by 'process_input()'
#
outFinal = rootname + suffix + '.fits'
outSci = rootname + suffix + '_sci.fits'
outWeight = rootname + suffix + '_wht.fits'
outContext = rootname + suffix + '_ctx.fits'
outMedian = rootname + '_med.fits'
# Build names based on input name
origFilename = self._filename.replace('.fits', '_OrIg.fits')
outSky = rootname + '_sky.fits'
outSingle = rootname + '_single_sci.fits'
outSWeight = rootname + '_single_wht.fits'
crCorImage = rootname + '_crclean.fits'
# Build outputNames dictionary
fnames = {'origFilename': origFilename, 'outFinal': outFinal, 'outMedian': outMedian, 'outSci': outSci, 'outWeight': outWeight, 'outContext': outContext, 'outSingle': outSingle, 'outSWeight': outSWeight, 'outSContext': None, 'outSky': outSky, 'crcorImage': crCorImage, 'ivmFile': None}
return fnames |
def member_at_in(self, leaderboard_name, position, **options):
'''
Retrieve a member at the specified index from the leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param position [int] Position in named leaderboard.
@param options [Hash] Options to be used when retrieving the member from the named leaderboard.
@return a page of leaders from the named leaderboard.
'''
if position > 0 and position <= self.total_members_in(leaderboard_name):
page_size = options.get('page_size', self.page_size)
current_page = math.ceil(float(position) / float(page_size))
offset = (position - 1) % page_size
leaders = self.leaders_in(
leaderboard_name,
current_page,
**options)
if leaders:
return leaders[offset] | def function[member_at_in, parameter[self, leaderboard_name, position]]:
constant[
Retrieve a member at the specified index from the leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param position [int] Position in named leaderboard.
@param options [Hash] Options to be used when retrieving the member from the named leaderboard.
@return a page of leaders from the named leaderboard.
]
if <ast.BoolOp object at 0x7da1b0592ce0> begin[:]
variable[page_size] assign[=] call[name[options].get, parameter[constant[page_size], name[self].page_size]]
variable[current_page] assign[=] call[name[math].ceil, parameter[binary_operation[call[name[float], parameter[name[position]]] / call[name[float], parameter[name[page_size]]]]]]
variable[offset] assign[=] binary_operation[binary_operation[name[position] - constant[1]] <ast.Mod object at 0x7da2590d6920> name[page_size]]
variable[leaders] assign[=] call[name[self].leaders_in, parameter[name[leaderboard_name], name[current_page]]]
if name[leaders] begin[:]
return[call[name[leaders]][name[offset]]] | keyword[def] identifier[member_at_in] ( identifier[self] , identifier[leaderboard_name] , identifier[position] ,** identifier[options] ):
literal[string]
keyword[if] identifier[position] > literal[int] keyword[and] identifier[position] <= identifier[self] . identifier[total_members_in] ( identifier[leaderboard_name] ):
identifier[page_size] = identifier[options] . identifier[get] ( literal[string] , identifier[self] . identifier[page_size] )
identifier[current_page] = identifier[math] . identifier[ceil] ( identifier[float] ( identifier[position] )/ identifier[float] ( identifier[page_size] ))
identifier[offset] =( identifier[position] - literal[int] )% identifier[page_size]
identifier[leaders] = identifier[self] . identifier[leaders_in] (
identifier[leaderboard_name] ,
identifier[current_page] ,
** identifier[options] )
keyword[if] identifier[leaders] :
keyword[return] identifier[leaders] [ identifier[offset] ] | def member_at_in(self, leaderboard_name, position, **options):
"""
Retrieve a member at the specified index from the leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param position [int] Position in named leaderboard.
@param options [Hash] Options to be used when retrieving the member from the named leaderboard.
@return a page of leaders from the named leaderboard.
"""
if position > 0 and position <= self.total_members_in(leaderboard_name):
page_size = options.get('page_size', self.page_size)
current_page = math.ceil(float(position) / float(page_size))
offset = (position - 1) % page_size
leaders = self.leaders_in(leaderboard_name, current_page, **options)
if leaders:
return leaders[offset] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def _parse_line(line):
"""
Convert one line from the extended log to dict.
Args:
line (str): Line which will be converted.
Returns:
dict: dict with ``timestamp``, ``command``, ``username`` and ``path`` \
keys.
Note:
Typical line looks like this::
/home/ftp/xex/asd bsd.dat, xex, STOR, 1398351777
Filename may contain ``,`` character, so I am ``rsplitting`` the line
from the end to the beginning.
"""
line, timestamp = line.rsplit(",", 1)
line, command = line.rsplit(",", 1)
path, username = line.rsplit(",", 1)
return {
"timestamp": timestamp.strip(),
"command": command.strip(),
"username": username.strip(),
"path": path,
} | def function[_parse_line, parameter[line]]:
constant[
Convert one line from the extended log to dict.
Args:
line (str): Line which will be converted.
Returns:
dict: dict with ``timestamp``, ``command``, ``username`` and ``path`` keys.
Note:
Typical line looks like this::
/home/ftp/xex/asd bsd.dat, xex, STOR, 1398351777
Filename may contain ``,`` character, so I am ``rsplitting`` the line
from the end to the beginning.
]
<ast.Tuple object at 0x7da18dc9a230> assign[=] call[name[line].rsplit, parameter[constant[,], constant[1]]]
<ast.Tuple object at 0x7da18dc99600> assign[=] call[name[line].rsplit, parameter[constant[,], constant[1]]]
<ast.Tuple object at 0x7da18dc985b0> assign[=] call[name[line].rsplit, parameter[constant[,], constant[1]]]
return[dictionary[[<ast.Constant object at 0x7da18dc98220>, <ast.Constant object at 0x7da18dc9b220>, <ast.Constant object at 0x7da18dc9a860>, <ast.Constant object at 0x7da18dc9bd30>], [<ast.Call object at 0x7da18dc983a0>, <ast.Call object at 0x7da18dc9aef0>, <ast.Call object at 0x7da18dc9b2e0>, <ast.Name object at 0x7da18dc999f0>]]] | keyword[def] identifier[_parse_line] ( identifier[line] ):
literal[string]
identifier[line] , identifier[timestamp] = identifier[line] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[line] , identifier[command] = identifier[line] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[path] , identifier[username] = identifier[line] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[return] {
literal[string] : identifier[timestamp] . identifier[strip] (),
literal[string] : identifier[command] . identifier[strip] (),
literal[string] : identifier[username] . identifier[strip] (),
literal[string] : identifier[path] ,
} | def _parse_line(line):
"""
Convert one line from the extended log to dict.
Args:
line (str): Line which will be converted.
Returns:
dict: dict with ``timestamp``, ``command``, ``username`` and ``path`` keys.
Note:
Typical line looks like this::
/home/ftp/xex/asd bsd.dat, xex, STOR, 1398351777
Filename may contain ``,`` character, so I am ``rsplitting`` the line
from the end to the beginning.
"""
(line, timestamp) = line.rsplit(',', 1)
(line, command) = line.rsplit(',', 1)
(path, username) = line.rsplit(',', 1)
return {'timestamp': timestamp.strip(), 'command': command.strip(), 'username': username.strip(), 'path': path} |
def train(self, *args, **kwargs):
'''Train the network until the trainer converges.
All arguments are passed to :func:`itertrain`.
Returns
-------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
monitors = None
for monitors in self.itertrain(*args, **kwargs):
pass
return monitors | def function[train, parameter[self]]:
constant[Train the network until the trainer converges.
All arguments are passed to :func:`itertrain`.
Returns
-------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
]
variable[monitors] assign[=] constant[None]
for taget[name[monitors]] in starred[call[name[self].itertrain, parameter[<ast.Starred object at 0x7da1b02d14b0>]]] begin[:]
pass
return[name[monitors]] | keyword[def] identifier[train] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[monitors] = keyword[None]
keyword[for] identifier[monitors] keyword[in] identifier[self] . identifier[itertrain] (* identifier[args] ,** identifier[kwargs] ):
keyword[pass]
keyword[return] identifier[monitors] | def train(self, *args, **kwargs):
"""Train the network until the trainer converges.
All arguments are passed to :func:`itertrain`.
Returns
-------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
"""
monitors = None
for monitors in self.itertrain(*args, **kwargs):
pass # depends on [control=['for'], data=[]]
return monitors |
def recv(self, picture, *args):
"""
Receive a 'picture' message to the socket (or actor). See zsock_send for
the format and meaning of the picture. Returns the picture elements into
a series of pointers as provided by the caller:
i = int * (stores signed integer)
4 = uint32_t * (stores 32-bit unsigned integer)
8 = uint64_t * (stores 64-bit unsigned integer)
s = char ** (allocates new string)
b = byte **, size_t * (2 arguments) (allocates memory)
c = zchunk_t ** (creates zchunk)
f = zframe_t ** (creates zframe)
U = zuuid_t * (creates a zuuid with the data)
h = zhashx_t ** (creates zhashx)
p = void ** (stores pointer)
m = zmsg_t ** (creates a zmsg with the remaining frames)
z = null, asserts empty frame (0 arguments)
u = uint * (stores unsigned integer, deprecated)
Note that zsock_recv creates the returned objects, and the caller must
destroy them when finished with them. The supplied pointers do not need
to be initialized. Returns 0 if successful, or -1 if it failed to recv
a message, in which case the pointers are not modified. When message
frames are truncated (a short message), sets return values to zero/null.
If an argument pointer is NULL, does not store any value (skips it).
An 'n' picture matches an empty frame; if the message does not match,
the method will return -1.
"""
return lib.zsock_recv(self._as_parameter_, picture, *args) | def function[recv, parameter[self, picture]]:
constant[
Receive a 'picture' message to the socket (or actor). See zsock_send for
the format and meaning of the picture. Returns the picture elements into
a series of pointers as provided by the caller:
i = int * (stores signed integer)
4 = uint32_t * (stores 32-bit unsigned integer)
8 = uint64_t * (stores 64-bit unsigned integer)
s = char ** (allocates new string)
b = byte **, size_t * (2 arguments) (allocates memory)
c = zchunk_t ** (creates zchunk)
f = zframe_t ** (creates zframe)
U = zuuid_t * (creates a zuuid with the data)
h = zhashx_t ** (creates zhashx)
p = void ** (stores pointer)
m = zmsg_t ** (creates a zmsg with the remaining frames)
z = null, asserts empty frame (0 arguments)
u = uint * (stores unsigned integer, deprecated)
Note that zsock_recv creates the returned objects, and the caller must
destroy them when finished with them. The supplied pointers do not need
to be initialized. Returns 0 if successful, or -1 if it failed to recv
a message, in which case the pointers are not modified. When message
frames are truncated (a short message), sets return values to zero/null.
If an argument pointer is NULL, does not store any value (skips it).
An 'n' picture matches an empty frame; if the message does not match,
the method will return -1.
]
return[call[name[lib].zsock_recv, parameter[name[self]._as_parameter_, name[picture], <ast.Starred object at 0x7da1b09d1ea0>]]] | keyword[def] identifier[recv] ( identifier[self] , identifier[picture] ,* identifier[args] ):
literal[string]
keyword[return] identifier[lib] . identifier[zsock_recv] ( identifier[self] . identifier[_as_parameter_] , identifier[picture] ,* identifier[args] ) | def recv(self, picture, *args):
"""
Receive a 'picture' message to the socket (or actor). See zsock_send for
the format and meaning of the picture. Returns the picture elements into
a series of pointers as provided by the caller:
i = int * (stores signed integer)
4 = uint32_t * (stores 32-bit unsigned integer)
8 = uint64_t * (stores 64-bit unsigned integer)
s = char ** (allocates new string)
b = byte **, size_t * (2 arguments) (allocates memory)
c = zchunk_t ** (creates zchunk)
f = zframe_t ** (creates zframe)
U = zuuid_t * (creates a zuuid with the data)
h = zhashx_t ** (creates zhashx)
p = void ** (stores pointer)
m = zmsg_t ** (creates a zmsg with the remaining frames)
z = null, asserts empty frame (0 arguments)
u = uint * (stores unsigned integer, deprecated)
Note that zsock_recv creates the returned objects, and the caller must
destroy them when finished with them. The supplied pointers do not need
to be initialized. Returns 0 if successful, or -1 if it failed to recv
a message, in which case the pointers are not modified. When message
frames are truncated (a short message), sets return values to zero/null.
If an argument pointer is NULL, does not store any value (skips it).
An 'n' picture matches an empty frame; if the message does not match,
the method will return -1.
"""
return lib.zsock_recv(self._as_parameter_, picture, *args) |
def max_num_reads(self, **params):
"""Returns the maximum number of reads for the given solver parameters.
Args:
**params:
Parameters for the sampling method. Relevant to num_reads:
- annealing_time
- readout_thermalization
- num_reads
- programming_thermalization
Returns:
int: The maximum number of reads.
"""
# dev note: in the future it would be good to have a way of doing this
# server-side, as we are duplicating logic here.
properties = self.properties
if self.software or not params:
# software solvers don't use any of the above parameters
return properties['num_reads_range'][1]
# qpu
_, duration = properties['problem_run_duration_range']
annealing_time = params.get('annealing_time',
properties['default_annealing_time'])
readout_thermalization = params.get('readout_thermalization',
properties['default_readout_thermalization'])
programming_thermalization = params.get('programming_thermalization',
properties['default_programming_thermalization'])
return min(properties['num_reads_range'][1],
int((duration - programming_thermalization)
/ (annealing_time + readout_thermalization))) | def function[max_num_reads, parameter[self]]:
constant[Returns the maximum number of reads for the given solver parameters.
Args:
**params:
Parameters for the sampling method. Relevant to num_reads:
- annealing_time
- readout_thermalization
- num_reads
- programming_thermalization
Returns:
int: The maximum number of reads.
]
variable[properties] assign[=] name[self].properties
if <ast.BoolOp object at 0x7da1b0feaec0> begin[:]
return[call[call[name[properties]][constant[num_reads_range]]][constant[1]]]
<ast.Tuple object at 0x7da1b0fe9ae0> assign[=] call[name[properties]][constant[problem_run_duration_range]]
variable[annealing_time] assign[=] call[name[params].get, parameter[constant[annealing_time], call[name[properties]][constant[default_annealing_time]]]]
variable[readout_thermalization] assign[=] call[name[params].get, parameter[constant[readout_thermalization], call[name[properties]][constant[default_readout_thermalization]]]]
variable[programming_thermalization] assign[=] call[name[params].get, parameter[constant[programming_thermalization], call[name[properties]][constant[default_programming_thermalization]]]]
return[call[name[min], parameter[call[call[name[properties]][constant[num_reads_range]]][constant[1]], call[name[int], parameter[binary_operation[binary_operation[name[duration] - name[programming_thermalization]] / binary_operation[name[annealing_time] + name[readout_thermalization]]]]]]]] | keyword[def] identifier[max_num_reads] ( identifier[self] ,** identifier[params] ):
literal[string]
identifier[properties] = identifier[self] . identifier[properties]
keyword[if] identifier[self] . identifier[software] keyword[or] keyword[not] identifier[params] :
keyword[return] identifier[properties] [ literal[string] ][ literal[int] ]
identifier[_] , identifier[duration] = identifier[properties] [ literal[string] ]
identifier[annealing_time] = identifier[params] . identifier[get] ( literal[string] ,
identifier[properties] [ literal[string] ])
identifier[readout_thermalization] = identifier[params] . identifier[get] ( literal[string] ,
identifier[properties] [ literal[string] ])
identifier[programming_thermalization] = identifier[params] . identifier[get] ( literal[string] ,
identifier[properties] [ literal[string] ])
keyword[return] identifier[min] ( identifier[properties] [ literal[string] ][ literal[int] ],
identifier[int] (( identifier[duration] - identifier[programming_thermalization] )
/( identifier[annealing_time] + identifier[readout_thermalization] ))) | def max_num_reads(self, **params):
"""Returns the maximum number of reads for the given solver parameters.
Args:
**params:
Parameters for the sampling method. Relevant to num_reads:
- annealing_time
- readout_thermalization
- num_reads
- programming_thermalization
Returns:
int: The maximum number of reads.
"""
# dev note: in the future it would be good to have a way of doing this
# server-side, as we are duplicating logic here.
properties = self.properties
if self.software or not params:
# software solvers don't use any of the above parameters
return properties['num_reads_range'][1] # depends on [control=['if'], data=[]]
# qpu
(_, duration) = properties['problem_run_duration_range']
annealing_time = params.get('annealing_time', properties['default_annealing_time'])
readout_thermalization = params.get('readout_thermalization', properties['default_readout_thermalization'])
programming_thermalization = params.get('programming_thermalization', properties['default_programming_thermalization'])
return min(properties['num_reads_range'][1], int((duration - programming_thermalization) / (annealing_time + readout_thermalization))) |
def show_messages(self):
"""Show all messages."""
if isinstance(self.static_message, MessageElement):
# Handle sent Message instance
string = html_header()
if self.static_message is not None:
string += self.static_message.to_html()
# Keep track of the last ID we had so we can scroll to it
self.last_id = 0
for message in self.dynamic_messages:
if message.element_id is None:
self.last_id += 1
message.element_id = str(self.last_id)
html = message.to_html(in_div_flag=True)
if html is not None:
string += html
string += html_footer()
elif (isinstance(self.static_message, str)):
# Handle sent text directly
string = self.static_message
elif self.static_message is not None:
string = str(self.static_message)
elif not self.static_message:
# handle dynamic message
# Handle sent Message instance
string = html_header()
# Keep track of the last ID we had so we can scroll to it
self.last_id = 0
for message in self.dynamic_messages:
if message.element_id is None:
self.last_id += 1
message.element_id = str(self.last_id)
html = message.to_html(in_div_flag=True)
if html is not None:
string += html
string += html_footer()
# Set HTML
self.load_html(HTML_STR_MODE, string) | def function[show_messages, parameter[self]]:
constant[Show all messages.]
if call[name[isinstance], parameter[name[self].static_message, name[MessageElement]]] begin[:]
variable[string] assign[=] call[name[html_header], parameter[]]
if compare[name[self].static_message is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b2346500>
name[self].last_id assign[=] constant[0]
for taget[name[message]] in starred[name[self].dynamic_messages] begin[:]
if compare[name[message].element_id is constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b2345e70>
name[message].element_id assign[=] call[name[str], parameter[name[self].last_id]]
variable[html] assign[=] call[name[message].to_html, parameter[]]
if compare[name[html] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b23460b0>
<ast.AugAssign object at 0x7da1b2347280>
call[name[self].load_html, parameter[name[HTML_STR_MODE], name[string]]] | keyword[def] identifier[show_messages] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[static_message] , identifier[MessageElement] ):
identifier[string] = identifier[html_header] ()
keyword[if] identifier[self] . identifier[static_message] keyword[is] keyword[not] keyword[None] :
identifier[string] += identifier[self] . identifier[static_message] . identifier[to_html] ()
identifier[self] . identifier[last_id] = literal[int]
keyword[for] identifier[message] keyword[in] identifier[self] . identifier[dynamic_messages] :
keyword[if] identifier[message] . identifier[element_id] keyword[is] keyword[None] :
identifier[self] . identifier[last_id] += literal[int]
identifier[message] . identifier[element_id] = identifier[str] ( identifier[self] . identifier[last_id] )
identifier[html] = identifier[message] . identifier[to_html] ( identifier[in_div_flag] = keyword[True] )
keyword[if] identifier[html] keyword[is] keyword[not] keyword[None] :
identifier[string] += identifier[html]
identifier[string] += identifier[html_footer] ()
keyword[elif] ( identifier[isinstance] ( identifier[self] . identifier[static_message] , identifier[str] )):
identifier[string] = identifier[self] . identifier[static_message]
keyword[elif] identifier[self] . identifier[static_message] keyword[is] keyword[not] keyword[None] :
identifier[string] = identifier[str] ( identifier[self] . identifier[static_message] )
keyword[elif] keyword[not] identifier[self] . identifier[static_message] :
identifier[string] = identifier[html_header] ()
identifier[self] . identifier[last_id] = literal[int]
keyword[for] identifier[message] keyword[in] identifier[self] . identifier[dynamic_messages] :
keyword[if] identifier[message] . identifier[element_id] keyword[is] keyword[None] :
identifier[self] . identifier[last_id] += literal[int]
identifier[message] . identifier[element_id] = identifier[str] ( identifier[self] . identifier[last_id] )
identifier[html] = identifier[message] . identifier[to_html] ( identifier[in_div_flag] = keyword[True] )
keyword[if] identifier[html] keyword[is] keyword[not] keyword[None] :
identifier[string] += identifier[html]
identifier[string] += identifier[html_footer] ()
identifier[self] . identifier[load_html] ( identifier[HTML_STR_MODE] , identifier[string] ) | def show_messages(self):
"""Show all messages."""
if isinstance(self.static_message, MessageElement):
# Handle sent Message instance
string = html_header()
if self.static_message is not None:
string += self.static_message.to_html() # depends on [control=['if'], data=[]]
# Keep track of the last ID we had so we can scroll to it
self.last_id = 0
for message in self.dynamic_messages:
if message.element_id is None:
self.last_id += 1
message.element_id = str(self.last_id) # depends on [control=['if'], data=[]]
html = message.to_html(in_div_flag=True)
if html is not None:
string += html # depends on [control=['if'], data=['html']] # depends on [control=['for'], data=['message']]
string += html_footer() # depends on [control=['if'], data=[]]
elif isinstance(self.static_message, str):
# Handle sent text directly
string = self.static_message # depends on [control=['if'], data=[]]
elif self.static_message is not None:
string = str(self.static_message) # depends on [control=['if'], data=[]]
elif not self.static_message:
# handle dynamic message
# Handle sent Message instance
string = html_header()
# Keep track of the last ID we had so we can scroll to it
self.last_id = 0
for message in self.dynamic_messages:
if message.element_id is None:
self.last_id += 1
message.element_id = str(self.last_id) # depends on [control=['if'], data=[]]
html = message.to_html(in_div_flag=True)
if html is not None:
string += html # depends on [control=['if'], data=['html']] # depends on [control=['for'], data=['message']]
string += html_footer() # depends on [control=['if'], data=[]]
# Set HTML
self.load_html(HTML_STR_MODE, string) |
def get_coding(text):
"""
Function to get the coding of a text.
@param text text to inspect (string)
@return coding string
"""
for line in text.splitlines()[:2]:
result = CODING_RE.search(to_text_string(line))
if result:
return result.group(1)
return None | def function[get_coding, parameter[text]]:
constant[
Function to get the coding of a text.
@param text text to inspect (string)
@return coding string
]
for taget[name[line]] in starred[call[call[name[text].splitlines, parameter[]]][<ast.Slice object at 0x7da1b2795300>]] begin[:]
variable[result] assign[=] call[name[CODING_RE].search, parameter[call[name[to_text_string], parameter[name[line]]]]]
if name[result] begin[:]
return[call[name[result].group, parameter[constant[1]]]]
return[constant[None]] | keyword[def] identifier[get_coding] ( identifier[text] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[text] . identifier[splitlines] ()[: literal[int] ]:
identifier[result] = identifier[CODING_RE] . identifier[search] ( identifier[to_text_string] ( identifier[line] ))
keyword[if] identifier[result] :
keyword[return] identifier[result] . identifier[group] ( literal[int] )
keyword[return] keyword[None] | def get_coding(text):
"""
Function to get the coding of a text.
@param text text to inspect (string)
@return coding string
"""
for line in text.splitlines()[:2]:
result = CODING_RE.search(to_text_string(line))
if result:
return result.group(1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return None |
def _log(self,
level,
fmt,
args=None,
extra=None,
exc_info=None,
inc_stackinfo=False,
inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed
log_record = self._make_record(
level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr)
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr)
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf)
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level) | def function[_log, parameter[self, level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc]]:
constant[ Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. ]
if <ast.UnaryOp object at 0x7da1b1a1d720> begin[:]
return[None]
variable[log_record] assign[=] call[name[self]._make_record, parameter[name[level], name[fmt], name[args], name[extra], name[exc_info], name[inc_stackinfo], name[inc_multiproc]]]
variable[logstr] assign[=] call[call[name[log_record]][constant[defaultfmt]].format, parameter[]]
if name[self].keep_history begin[:]
call[name[self].history.append, parameter[name[logstr]]]
variable[log_funcs] assign[=] call[name[self].config][name[level]]
variable[to_remove] assign[=] list[[]]
for taget[name[lf]] in starred[name[log_funcs]] begin[:]
<ast.Try object at 0x7da18ede77f0>
for taget[name[lf]] in starred[name[to_remove]] begin[:]
call[name[self].remove, parameter[name[level], name[lf]]]
call[name[self].info, parameter[constant[Logging function {} removed from level {}], name[lf], name[level]]] | keyword[def] identifier[_log] ( identifier[self] ,
identifier[level] ,
identifier[fmt] ,
identifier[args] = keyword[None] ,
identifier[extra] = keyword[None] ,
identifier[exc_info] = keyword[None] ,
identifier[inc_stackinfo] = keyword[False] ,
identifier[inc_multiproc] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[enabled] :
keyword[return]
identifier[log_record] = identifier[self] . identifier[_make_record] (
identifier[level] , identifier[fmt] , identifier[args] , identifier[extra] , identifier[exc_info] , identifier[inc_stackinfo] , identifier[inc_multiproc] )
identifier[logstr] = identifier[log_record] [ literal[string] ]. identifier[format] (** identifier[log_record] )
keyword[if] identifier[self] . identifier[keep_history] :
identifier[self] . identifier[history] . identifier[append] ( identifier[logstr] )
identifier[log_funcs] = identifier[self] . identifier[config] [ identifier[level] ]
identifier[to_remove] =[]
keyword[for] identifier[lf] keyword[in] identifier[log_funcs] :
keyword[try] :
identifier[lf] . identifier[send] ( identifier[logstr] )
keyword[except] identifier[StopIteration] :
identifier[to_remove] . identifier[append] ( identifier[lf] )
keyword[for] identifier[lf] keyword[in] identifier[to_remove] :
identifier[self] . identifier[remove] ( identifier[level] , identifier[lf] )
identifier[self] . identifier[info] ( literal[string] , identifier[lf] , identifier[level] ) | def _log(self, level, fmt, args=None, extra=None, exc_info=None, inc_stackinfo=False, inc_multiproc=False):
""" Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance's history. """
if not self.enabled:
return # Fail silently so that logging can easily be removed # depends on [control=['if'], data=[]]
log_record = self._make_record(level, fmt, args, extra, exc_info, inc_stackinfo, inc_multiproc)
logstr = log_record['defaultfmt'].format(**log_record) #whoah.
if self.keep_history:
self.history.append(logstr) # depends on [control=['if'], data=[]]
log_funcs = self.config[level]
to_remove = []
for lf in log_funcs:
try:
lf.send(logstr) # depends on [control=['try'], data=[]]
except StopIteration:
# in the case that the log function is already closed, add it
# to the list of functions to be deleted.
to_remove.append(lf) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['lf']]
for lf in to_remove:
self.remove(level, lf)
self.info('Logging function {} removed from level {}', lf, level) # depends on [control=['for'], data=['lf']] |
def reduce_weighted_logsumexp(logx,
w=None,
axis=None,
keep_dims=False,
return_sign=False,
name=None):
"""Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.log(w))` is more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
"""
with tf.name_scope(name or "reduce_weighted_logsumexp"):
logx = tf.convert_to_tensor(value=logx, name="logx")
if w is None:
lswe = tf.reduce_logsumexp(
input_tensor=logx, axis=axis, keepdims=keep_dims)
if return_sign:
sgn = tf.ones_like(lswe)
return lswe, sgn
return lswe
w = tf.convert_to_tensor(value=w, dtype=logx.dtype, name="w")
log_absw_x = logx + tf.math.log(tf.abs(w))
max_log_absw_x = tf.reduce_max(
input_tensor=log_absw_x, axis=axis, keepdims=True)
# If the largest element is `-inf` or `inf` then we don't bother subtracting
# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That
# this is ok follows from the fact that we're actually free to subtract any
# value we like, so long as we add it back after taking the `log(sum(...))`.
max_log_absw_x = tf.where(
tf.math.is_inf(max_log_absw_x), tf.zeros_like(max_log_absw_x),
max_log_absw_x)
wx_over_max_absw_x = (tf.sign(w) * tf.exp(log_absw_x - max_log_absw_x))
sum_wx_over_max_absw_x = tf.reduce_sum(
input_tensor=wx_over_max_absw_x, axis=axis, keepdims=keep_dims)
if not keep_dims:
max_log_absw_x = tf.squeeze(max_log_absw_x, axis)
sgn = tf.sign(sum_wx_over_max_absw_x)
lswe = max_log_absw_x + tf.math.log(sgn * sum_wx_over_max_absw_x)
if return_sign:
return lswe, sgn
return lswe | def function[reduce_weighted_logsumexp, parameter[logx, w, axis, keep_dims, return_sign, name]]:
constant[Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.log(w))` is more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
]
with call[name[tf].name_scope, parameter[<ast.BoolOp object at 0x7da1b05bedd0>]] begin[:]
variable[logx] assign[=] call[name[tf].convert_to_tensor, parameter[]]
if compare[name[w] is constant[None]] begin[:]
variable[lswe] assign[=] call[name[tf].reduce_logsumexp, parameter[]]
if name[return_sign] begin[:]
variable[sgn] assign[=] call[name[tf].ones_like, parameter[name[lswe]]]
return[tuple[[<ast.Name object at 0x7da1b0528b80>, <ast.Name object at 0x7da1b052ae30>]]]
return[name[lswe]]
variable[w] assign[=] call[name[tf].convert_to_tensor, parameter[]]
variable[log_absw_x] assign[=] binary_operation[name[logx] + call[name[tf].math.log, parameter[call[name[tf].abs, parameter[name[w]]]]]]
variable[max_log_absw_x] assign[=] call[name[tf].reduce_max, parameter[]]
variable[max_log_absw_x] assign[=] call[name[tf].where, parameter[call[name[tf].math.is_inf, parameter[name[max_log_absw_x]]], call[name[tf].zeros_like, parameter[name[max_log_absw_x]]], name[max_log_absw_x]]]
variable[wx_over_max_absw_x] assign[=] binary_operation[call[name[tf].sign, parameter[name[w]]] * call[name[tf].exp, parameter[binary_operation[name[log_absw_x] - name[max_log_absw_x]]]]]
variable[sum_wx_over_max_absw_x] assign[=] call[name[tf].reduce_sum, parameter[]]
if <ast.UnaryOp object at 0x7da1b03fa590> begin[:]
variable[max_log_absw_x] assign[=] call[name[tf].squeeze, parameter[name[max_log_absw_x], name[axis]]]
variable[sgn] assign[=] call[name[tf].sign, parameter[name[sum_wx_over_max_absw_x]]]
variable[lswe] assign[=] binary_operation[name[max_log_absw_x] + call[name[tf].math.log, parameter[binary_operation[name[sgn] * name[sum_wx_over_max_absw_x]]]]]
if name[return_sign] begin[:]
return[tuple[[<ast.Name object at 0x7da1b03fb070>, <ast.Name object at 0x7da1b03f8670>]]]
return[name[lswe]] | keyword[def] identifier[reduce_weighted_logsumexp] ( identifier[logx] ,
identifier[w] = keyword[None] ,
identifier[axis] = keyword[None] ,
identifier[keep_dims] = keyword[False] ,
identifier[return_sign] = keyword[False] ,
identifier[name] = keyword[None] ):
literal[string]
keyword[with] identifier[tf] . identifier[name_scope] ( identifier[name] keyword[or] literal[string] ):
identifier[logx] = identifier[tf] . identifier[convert_to_tensor] ( identifier[value] = identifier[logx] , identifier[name] = literal[string] )
keyword[if] identifier[w] keyword[is] keyword[None] :
identifier[lswe] = identifier[tf] . identifier[reduce_logsumexp] (
identifier[input_tensor] = identifier[logx] , identifier[axis] = identifier[axis] , identifier[keepdims] = identifier[keep_dims] )
keyword[if] identifier[return_sign] :
identifier[sgn] = identifier[tf] . identifier[ones_like] ( identifier[lswe] )
keyword[return] identifier[lswe] , identifier[sgn]
keyword[return] identifier[lswe]
identifier[w] = identifier[tf] . identifier[convert_to_tensor] ( identifier[value] = identifier[w] , identifier[dtype] = identifier[logx] . identifier[dtype] , identifier[name] = literal[string] )
identifier[log_absw_x] = identifier[logx] + identifier[tf] . identifier[math] . identifier[log] ( identifier[tf] . identifier[abs] ( identifier[w] ))
identifier[max_log_absw_x] = identifier[tf] . identifier[reduce_max] (
identifier[input_tensor] = identifier[log_absw_x] , identifier[axis] = identifier[axis] , identifier[keepdims] = keyword[True] )
identifier[max_log_absw_x] = identifier[tf] . identifier[where] (
identifier[tf] . identifier[math] . identifier[is_inf] ( identifier[max_log_absw_x] ), identifier[tf] . identifier[zeros_like] ( identifier[max_log_absw_x] ),
identifier[max_log_absw_x] )
identifier[wx_over_max_absw_x] =( identifier[tf] . identifier[sign] ( identifier[w] )* identifier[tf] . identifier[exp] ( identifier[log_absw_x] - identifier[max_log_absw_x] ))
identifier[sum_wx_over_max_absw_x] = identifier[tf] . identifier[reduce_sum] (
identifier[input_tensor] = identifier[wx_over_max_absw_x] , identifier[axis] = identifier[axis] , identifier[keepdims] = identifier[keep_dims] )
keyword[if] keyword[not] identifier[keep_dims] :
identifier[max_log_absw_x] = identifier[tf] . identifier[squeeze] ( identifier[max_log_absw_x] , identifier[axis] )
identifier[sgn] = identifier[tf] . identifier[sign] ( identifier[sum_wx_over_max_absw_x] )
identifier[lswe] = identifier[max_log_absw_x] + identifier[tf] . identifier[math] . identifier[log] ( identifier[sgn] * identifier[sum_wx_over_max_absw_x] )
keyword[if] identifier[return_sign] :
keyword[return] identifier[lswe] , identifier[sgn]
keyword[return] identifier[lswe] | def reduce_weighted_logsumexp(logx, w=None, axis=None, keep_dims=False, return_sign=False, name=None):
"""Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.log(w))` is more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
"""
with tf.name_scope(name or 'reduce_weighted_logsumexp'):
logx = tf.convert_to_tensor(value=logx, name='logx')
if w is None:
lswe = tf.reduce_logsumexp(input_tensor=logx, axis=axis, keepdims=keep_dims)
if return_sign:
sgn = tf.ones_like(lswe)
return (lswe, sgn) # depends on [control=['if'], data=[]]
return lswe # depends on [control=['if'], data=[]]
w = tf.convert_to_tensor(value=w, dtype=logx.dtype, name='w')
log_absw_x = logx + tf.math.log(tf.abs(w))
max_log_absw_x = tf.reduce_max(input_tensor=log_absw_x, axis=axis, keepdims=True)
# If the largest element is `-inf` or `inf` then we don't bother subtracting
# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That
# this is ok follows from the fact that we're actually free to subtract any
# value we like, so long as we add it back after taking the `log(sum(...))`.
max_log_absw_x = tf.where(tf.math.is_inf(max_log_absw_x), tf.zeros_like(max_log_absw_x), max_log_absw_x)
wx_over_max_absw_x = tf.sign(w) * tf.exp(log_absw_x - max_log_absw_x)
sum_wx_over_max_absw_x = tf.reduce_sum(input_tensor=wx_over_max_absw_x, axis=axis, keepdims=keep_dims)
if not keep_dims:
max_log_absw_x = tf.squeeze(max_log_absw_x, axis) # depends on [control=['if'], data=[]]
sgn = tf.sign(sum_wx_over_max_absw_x)
lswe = max_log_absw_x + tf.math.log(sgn * sum_wx_over_max_absw_x)
if return_sign:
return (lswe, sgn) # depends on [control=['if'], data=[]]
return lswe # depends on [control=['with'], data=[]] |
def read_msg(self):
"""Read a line of input. EOFError will be raised on EOF.
Note that we don't support prompting"""
# FIXME: do we have to create and check a buffer for
# lines?
if self.state == 'active':
if not self.input:
self.input = open(self.in_name, 'r')
pass
line = self.input.readline()
if not line:
self.state = 'disconnected'
raise EOFError
return line.encode("utf-8")
else:
raise IOError("readline called in state: %s." % self.state)
return | def function[read_msg, parameter[self]]:
constant[Read a line of input. EOFError will be raised on EOF.
Note that we don't support prompting]
if compare[name[self].state equal[==] constant[active]] begin[:]
if <ast.UnaryOp object at 0x7da1b03ba110> begin[:]
name[self].input assign[=] call[name[open], parameter[name[self].in_name, constant[r]]]
pass
variable[line] assign[=] call[name[self].input.readline, parameter[]]
if <ast.UnaryOp object at 0x7da1b03bae60> begin[:]
name[self].state assign[=] constant[disconnected]
<ast.Raise object at 0x7da1b03b9cf0>
return[call[name[line].encode, parameter[constant[utf-8]]]]
return[None] | keyword[def] identifier[read_msg] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[state] == literal[string] :
keyword[if] keyword[not] identifier[self] . identifier[input] :
identifier[self] . identifier[input] = identifier[open] ( identifier[self] . identifier[in_name] , literal[string] )
keyword[pass]
identifier[line] = identifier[self] . identifier[input] . identifier[readline] ()
keyword[if] keyword[not] identifier[line] :
identifier[self] . identifier[state] = literal[string]
keyword[raise] identifier[EOFError]
keyword[return] identifier[line] . identifier[encode] ( literal[string] )
keyword[else] :
keyword[raise] identifier[IOError] ( literal[string] % identifier[self] . identifier[state] )
keyword[return] | def read_msg(self):
"""Read a line of input. EOFError will be raised on EOF.
Note that we don't support prompting"""
# FIXME: do we have to create and check a buffer for
# lines?
if self.state == 'active':
if not self.input:
self.input = open(self.in_name, 'r')
pass # depends on [control=['if'], data=[]]
line = self.input.readline()
if not line:
self.state = 'disconnected'
raise EOFError # depends on [control=['if'], data=[]]
return line.encode('utf-8') # depends on [control=['if'], data=[]]
else:
raise IOError('readline called in state: %s.' % self.state)
return |
def x_frame2D(X,plot_limits=None,resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1]==2, "x_frame2D is defined for two-dimensional inputs"
if plot_limits is None:
xmin, xmax = X.min(0), X.max(0)
xmin, xmax = xmin-0.075*(xmax-xmin), xmax+0.075*(xmax-xmin)
elif len(plot_limits) == 2:
xmin, xmax = plot_limits
try:
xmin = xmin[0], xmin[1]
except:
# only one limit given, copy over to other lim
xmin = [plot_limits[0], plot_limits[0]]
xmax = [plot_limits[1], plot_limits[1]]
elif len(plot_limits) == 4:
xmin, xmax = (plot_limits[0], plot_limits[2]), (plot_limits[1], plot_limits[3])
else:
raise ValueError("Bad limits for plotting")
resolution = resolution or 50
xx, yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution]
Xnew = np.c_[xx.flat, yy.flat]
return Xnew, xx, yy, xmin, xmax | def function[x_frame2D, parameter[X, plot_limits, resolution]]:
constant[
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
]
assert[compare[call[name[X].shape][constant[1]] equal[==] constant[2]]]
if compare[name[plot_limits] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b26ad570> assign[=] tuple[[<ast.Call object at 0x7da1b26ac790>, <ast.Call object at 0x7da1b26af0a0>]]
<ast.Tuple object at 0x7da1b26ad870> assign[=] tuple[[<ast.BinOp object at 0x7da1b26ac9a0>, <ast.BinOp object at 0x7da1b26ad600>]]
variable[resolution] assign[=] <ast.BoolOp object at 0x7da1b26adb10>
<ast.Tuple object at 0x7da1b26ac160> assign[=] call[name[np].mgrid][tuple[[<ast.Slice object at 0x7da1b26aeb30>, <ast.Slice object at 0x7da1b26af9d0>]]]
variable[Xnew] assign[=] call[name[np].c_][tuple[[<ast.Attribute object at 0x7da1b26ac430>, <ast.Attribute object at 0x7da1b26af4f0>]]]
return[tuple[[<ast.Name object at 0x7da1b26aee90>, <ast.Name object at 0x7da1b26aeb90>, <ast.Name object at 0x7da1b26ae9b0>, <ast.Name object at 0x7da1b26af670>, <ast.Name object at 0x7da1b26ad930>]]] | keyword[def] identifier[x_frame2D] ( identifier[X] , identifier[plot_limits] = keyword[None] , identifier[resolution] = keyword[None] ):
literal[string]
keyword[assert] identifier[X] . identifier[shape] [ literal[int] ]== literal[int] , literal[string]
keyword[if] identifier[plot_limits] keyword[is] keyword[None] :
identifier[xmin] , identifier[xmax] = identifier[X] . identifier[min] ( literal[int] ), identifier[X] . identifier[max] ( literal[int] )
identifier[xmin] , identifier[xmax] = identifier[xmin] - literal[int] *( identifier[xmax] - identifier[xmin] ), identifier[xmax] + literal[int] *( identifier[xmax] - identifier[xmin] )
keyword[elif] identifier[len] ( identifier[plot_limits] )== literal[int] :
identifier[xmin] , identifier[xmax] = identifier[plot_limits]
keyword[try] :
identifier[xmin] = identifier[xmin] [ literal[int] ], identifier[xmin] [ literal[int] ]
keyword[except] :
identifier[xmin] =[ identifier[plot_limits] [ literal[int] ], identifier[plot_limits] [ literal[int] ]]
identifier[xmax] =[ identifier[plot_limits] [ literal[int] ], identifier[plot_limits] [ literal[int] ]]
keyword[elif] identifier[len] ( identifier[plot_limits] )== literal[int] :
identifier[xmin] , identifier[xmax] =( identifier[plot_limits] [ literal[int] ], identifier[plot_limits] [ literal[int] ]),( identifier[plot_limits] [ literal[int] ], identifier[plot_limits] [ literal[int] ])
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[resolution] = identifier[resolution] keyword[or] literal[int]
identifier[xx] , identifier[yy] = identifier[np] . identifier[mgrid] [ identifier[xmin] [ literal[int] ]: identifier[xmax] [ literal[int] ]: literal[int] * identifier[resolution] , identifier[xmin] [ literal[int] ]: identifier[xmax] [ literal[int] ]: literal[int] * identifier[resolution] ]
identifier[Xnew] = identifier[np] . identifier[c_] [ identifier[xx] . identifier[flat] , identifier[yy] . identifier[flat] ]
keyword[return] identifier[Xnew] , identifier[xx] , identifier[yy] , identifier[xmin] , identifier[xmax] | def x_frame2D(X, plot_limits=None, resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1] == 2, 'x_frame2D is defined for two-dimensional inputs'
if plot_limits is None:
(xmin, xmax) = (X.min(0), X.max(0))
(xmin, xmax) = (xmin - 0.075 * (xmax - xmin), xmax + 0.075 * (xmax - xmin)) # depends on [control=['if'], data=[]]
elif len(plot_limits) == 2:
(xmin, xmax) = plot_limits
try:
xmin = (xmin[0], xmin[1]) # depends on [control=['try'], data=[]]
except:
# only one limit given, copy over to other lim
xmin = [plot_limits[0], plot_limits[0]]
xmax = [plot_limits[1], plot_limits[1]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif len(plot_limits) == 4:
(xmin, xmax) = ((plot_limits[0], plot_limits[2]), (plot_limits[1], plot_limits[3])) # depends on [control=['if'], data=[]]
else:
raise ValueError('Bad limits for plotting')
resolution = resolution or 50
(xx, yy) = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:xmax[1]:1j * resolution]
Xnew = np.c_[xx.flat, yy.flat]
return (Xnew, xx, yy, xmin, xmax) |
def visible(self, visible):
"""When visible changed, do setup or unwatch and call visible_callback"""
self._visible = visible
if visible and len(self.panel.objects) == 0:
self.setup()
self.panel.extend(self.children)
elif not visible and len(self.panel.objects) > 0:
self.unwatch()
self.panel.clear()
if self.visible_callback:
self.visible_callback(visible) | def function[visible, parameter[self, visible]]:
constant[When visible changed, do setup or unwatch and call visible_callback]
name[self]._visible assign[=] name[visible]
if <ast.BoolOp object at 0x7da1b17f69b0> begin[:]
call[name[self].setup, parameter[]]
call[name[self].panel.extend, parameter[name[self].children]]
if name[self].visible_callback begin[:]
call[name[self].visible_callback, parameter[name[visible]]] | keyword[def] identifier[visible] ( identifier[self] , identifier[visible] ):
literal[string]
identifier[self] . identifier[_visible] = identifier[visible]
keyword[if] identifier[visible] keyword[and] identifier[len] ( identifier[self] . identifier[panel] . identifier[objects] )== literal[int] :
identifier[self] . identifier[setup] ()
identifier[self] . identifier[panel] . identifier[extend] ( identifier[self] . identifier[children] )
keyword[elif] keyword[not] identifier[visible] keyword[and] identifier[len] ( identifier[self] . identifier[panel] . identifier[objects] )> literal[int] :
identifier[self] . identifier[unwatch] ()
identifier[self] . identifier[panel] . identifier[clear] ()
keyword[if] identifier[self] . identifier[visible_callback] :
identifier[self] . identifier[visible_callback] ( identifier[visible] ) | def visible(self, visible):
"""When visible changed, do setup or unwatch and call visible_callback"""
self._visible = visible
if visible and len(self.panel.objects) == 0:
self.setup()
self.panel.extend(self.children) # depends on [control=['if'], data=[]]
elif not visible and len(self.panel.objects) > 0:
self.unwatch()
self.panel.clear() # depends on [control=['if'], data=[]]
if self.visible_callback:
self.visible_callback(visible) # depends on [control=['if'], data=[]] |
def _collect_data(directory):
"""Traverses directory collecting input and target files.
Args:
directory: base path to extracted audio and transcripts.
Returns:
list of (media_base, media_filepath, label) tuples
"""
# Returns:
data_files = []
transcripts = [
filename for filename in os.listdir(directory)
if filename.endswith(".csv")
]
for transcript in transcripts:
transcript_path = os.path.join(directory, transcript)
with open(transcript_path, "r") as transcript_file:
transcript_reader = csv.reader(transcript_file)
# skip header
_ = next(transcript_reader)
for transcript_line in transcript_reader:
media_name, label = transcript_line[0:2]
filename = os.path.join(directory, media_name)
data_files.append((media_name, filename, label))
return data_files | def function[_collect_data, parameter[directory]]:
constant[Traverses directory collecting input and target files.
Args:
directory: base path to extracted audio and transcripts.
Returns:
list of (media_base, media_filepath, label) tuples
]
variable[data_files] assign[=] list[[]]
variable[transcripts] assign[=] <ast.ListComp object at 0x7da1b1ff0b20>
for taget[name[transcript]] in starred[name[transcripts]] begin[:]
variable[transcript_path] assign[=] call[name[os].path.join, parameter[name[directory], name[transcript]]]
with call[name[open], parameter[name[transcript_path], constant[r]]] begin[:]
variable[transcript_reader] assign[=] call[name[csv].reader, parameter[name[transcript_file]]]
variable[_] assign[=] call[name[next], parameter[name[transcript_reader]]]
for taget[name[transcript_line]] in starred[name[transcript_reader]] begin[:]
<ast.Tuple object at 0x7da1b1ff0820> assign[=] call[name[transcript_line]][<ast.Slice object at 0x7da1b1ff1c30>]
variable[filename] assign[=] call[name[os].path.join, parameter[name[directory], name[media_name]]]
call[name[data_files].append, parameter[tuple[[<ast.Name object at 0x7da204567040>, <ast.Name object at 0x7da204566530>, <ast.Name object at 0x7da204564640>]]]]
return[name[data_files]] | keyword[def] identifier[_collect_data] ( identifier[directory] ):
literal[string]
identifier[data_files] =[]
identifier[transcripts] =[
identifier[filename] keyword[for] identifier[filename] keyword[in] identifier[os] . identifier[listdir] ( identifier[directory] )
keyword[if] identifier[filename] . identifier[endswith] ( literal[string] )
]
keyword[for] identifier[transcript] keyword[in] identifier[transcripts] :
identifier[transcript_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[transcript] )
keyword[with] identifier[open] ( identifier[transcript_path] , literal[string] ) keyword[as] identifier[transcript_file] :
identifier[transcript_reader] = identifier[csv] . identifier[reader] ( identifier[transcript_file] )
identifier[_] = identifier[next] ( identifier[transcript_reader] )
keyword[for] identifier[transcript_line] keyword[in] identifier[transcript_reader] :
identifier[media_name] , identifier[label] = identifier[transcript_line] [ literal[int] : literal[int] ]
identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[media_name] )
identifier[data_files] . identifier[append] (( identifier[media_name] , identifier[filename] , identifier[label] ))
keyword[return] identifier[data_files] | def _collect_data(directory):
"""Traverses directory collecting input and target files.
Args:
directory: base path to extracted audio and transcripts.
Returns:
list of (media_base, media_filepath, label) tuples
"""
# Returns:
data_files = []
transcripts = [filename for filename in os.listdir(directory) if filename.endswith('.csv')]
for transcript in transcripts:
transcript_path = os.path.join(directory, transcript)
with open(transcript_path, 'r') as transcript_file:
transcript_reader = csv.reader(transcript_file)
# skip header
_ = next(transcript_reader)
for transcript_line in transcript_reader:
(media_name, label) = transcript_line[0:2]
filename = os.path.join(directory, media_name)
data_files.append((media_name, filename, label)) # depends on [control=['for'], data=['transcript_line']] # depends on [control=['with'], data=['transcript_file']] # depends on [control=['for'], data=['transcript']]
return data_files |
def _read_pklc(lcfile):
'''
This just reads a light curve pickle file.
Parameters
----------
lcfile : str
The file name of the pickle to open.
Returns
-------
dict
This returns an lcdict.
'''
if lcfile.endswith('.gz'):
try:
with gzip.open(lcfile,'rb') as infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with gzip.open(lcfile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
else:
try:
with open(lcfile,'rb') as infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with open(lcfile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
return lcdict | def function[_read_pklc, parameter[lcfile]]:
constant[
This just reads a light curve pickle file.
Parameters
----------
lcfile : str
The file name of the pickle to open.
Returns
-------
dict
This returns an lcdict.
]
if call[name[lcfile].endswith, parameter[constant[.gz]]] begin[:]
<ast.Try object at 0x7da1b00378b0>
return[name[lcdict]] | keyword[def] identifier[_read_pklc] ( identifier[lcfile] ):
literal[string]
keyword[if] identifier[lcfile] . identifier[endswith] ( literal[string] ):
keyword[try] :
keyword[with] identifier[gzip] . identifier[open] ( identifier[lcfile] , literal[string] ) keyword[as] identifier[infd] :
identifier[lcdict] = identifier[pickle] . identifier[load] ( identifier[infd] )
keyword[except] identifier[UnicodeDecodeError] :
keyword[with] identifier[gzip] . identifier[open] ( identifier[lcfile] , literal[string] ) keyword[as] identifier[infd] :
identifier[lcdict] = identifier[pickle] . identifier[load] ( identifier[infd] , identifier[encoding] = literal[string] )
keyword[else] :
keyword[try] :
keyword[with] identifier[open] ( identifier[lcfile] , literal[string] ) keyword[as] identifier[infd] :
identifier[lcdict] = identifier[pickle] . identifier[load] ( identifier[infd] )
keyword[except] identifier[UnicodeDecodeError] :
keyword[with] identifier[open] ( identifier[lcfile] , literal[string] ) keyword[as] identifier[infd] :
identifier[lcdict] = identifier[pickle] . identifier[load] ( identifier[infd] , identifier[encoding] = literal[string] )
keyword[return] identifier[lcdict] | def _read_pklc(lcfile):
"""
This just reads a light curve pickle file.
Parameters
----------
lcfile : str
The file name of the pickle to open.
Returns
-------
dict
This returns an lcdict.
"""
if lcfile.endswith('.gz'):
try:
with gzip.open(lcfile, 'rb') as infd:
lcdict = pickle.load(infd) # depends on [control=['with'], data=['infd']] # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
with gzip.open(lcfile, 'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1') # depends on [control=['with'], data=['infd']] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
try:
with open(lcfile, 'rb') as infd:
lcdict = pickle.load(infd) # depends on [control=['with'], data=['infd']] # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
with open(lcfile, 'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1') # depends on [control=['with'], data=['infd']] # depends on [control=['except'], data=[]]
return lcdict |
def from_sample(sample):
"""Upload results of processing from an analysis pipeline sample.
"""
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files(sample):
approach.update_file(finfo, sample, upload_config)
return [[sample]] | def function[from_sample, parameter[sample]]:
constant[Upload results of processing from an analysis pipeline sample.
]
variable[upload_config] assign[=] call[name[sample].get, parameter[constant[upload]]]
if name[upload_config] begin[:]
variable[approach] assign[=] call[name[_approaches]][call[name[upload_config].get, parameter[constant[method], constant[filesystem]]]]
for taget[name[finfo]] in starred[call[name[_get_files], parameter[name[sample]]]] begin[:]
call[name[approach].update_file, parameter[name[finfo], name[sample], name[upload_config]]]
return[list[[<ast.List object at 0x7da1b23479d0>]]] | keyword[def] identifier[from_sample] ( identifier[sample] ):
literal[string]
identifier[upload_config] = identifier[sample] . identifier[get] ( literal[string] )
keyword[if] identifier[upload_config] :
identifier[approach] = identifier[_approaches] [ identifier[upload_config] . identifier[get] ( literal[string] , literal[string] )]
keyword[for] identifier[finfo] keyword[in] identifier[_get_files] ( identifier[sample] ):
identifier[approach] . identifier[update_file] ( identifier[finfo] , identifier[sample] , identifier[upload_config] )
keyword[return] [[ identifier[sample] ]] | def from_sample(sample):
"""Upload results of processing from an analysis pipeline sample.
"""
upload_config = sample.get('upload')
if upload_config:
approach = _approaches[upload_config.get('method', 'filesystem')]
for finfo in _get_files(sample):
approach.update_file(finfo, sample, upload_config) # depends on [control=['for'], data=['finfo']] # depends on [control=['if'], data=[]]
return [[sample]] |
def print_table(self, stream=sys.stdout, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
"""
print(self.to_table(filter_function=filter_function), file=stream) | def function[print_table, parameter[self, stream, filter_function]]:
constant[
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
]
call[name[print], parameter[call[name[self].to_table, parameter[]]]] | keyword[def] identifier[print_table] ( identifier[self] , identifier[stream] = identifier[sys] . identifier[stdout] , identifier[filter_function] = keyword[None] ):
literal[string]
identifier[print] ( identifier[self] . identifier[to_table] ( identifier[filter_function] = identifier[filter_function] ), identifier[file] = identifier[stream] ) | def print_table(self, stream=sys.stdout, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
"""
print(self.to_table(filter_function=filter_function), file=stream) |
def convert(self, lat, lon, source, dest, height=0, datetime=None,
precision=1e-10, ssheight=50*6371):
"""Converts between geodetic, modified apex, quasi-dipole and MLT.
Parameters
==========
lat : array_like
Latitude
lon : array_like
Longitude/MLT
source : {'geo', 'apex', 'qd', 'mlt'}
Input coordinate system
dest : {'geo', 'apex', 'qd', 'mlt'}
Output coordinate system
height : array_like, optional
Altitude in km
datetime : :class:`datetime.datetime`
Date and time for MLT conversions (required for MLT conversions)
precision : float, optional
Precision of output (degrees) when converting to geo. A negative
value of this argument produces a low-precision calculation of
geodetic lat/lon based only on their spherical harmonic
representation.
A positive value causes the underlying Fortran routine to iterate
until feeding the output geo lat/lon into geo2qd (APXG2Q) reproduces
the input QD lat/lon to within the specified precision (all
coordinates being converted to geo are converted to QD first and
passed through APXG2Q).
ssheight : float, optional
Altitude in km to use for converting the subsolar point from
geographic to magnetic coordinates. A high altitude is used
to ensure the subsolar point is mapped to high latitudes, which
prevents the South-Atlantic Anomaly (SAA) from influencing the MLT.
Returns
=======
lat : ndarray or float
Converted latitude (if converting to MLT, output latitude is apex)
lat : ndarray or float
Converted longitude/MLT
"""
if datetime is None and ('mlt' in [source, dest]):
raise ValueError('datetime must be given for MLT calculations')
lat = helpers.checklat(lat)
if source == dest:
return lat, lon
# from geo
elif source == 'geo' and dest == 'apex':
lat, lon = self.geo2apex(lat, lon, height)
elif source == 'geo' and dest == 'qd':
lat, lon = self.geo2qd(lat, lon, height)
elif source == 'geo' and dest == 'mlt':
lat, lon = self.geo2apex(lat, lon, height)
lon = self.mlon2mlt(lon, datetime, ssheight=ssheight)
# from apex
elif source == 'apex' and dest == 'geo':
lat, lon, _ = self.apex2geo(lat, lon, height, precision=precision)
elif source == 'apex' and dest == 'qd':
lat, lon = self.apex2qd(lat, lon, height=height)
elif source == 'apex' and dest == 'mlt':
lon = self.mlon2mlt(lon, datetime, ssheight=ssheight)
# from qd
elif source == 'qd' and dest == 'geo':
lat, lon, _ = self.qd2geo(lat, lon, height, precision=precision)
elif source == 'qd' and dest == 'apex':
lat, lon = self.qd2apex(lat, lon, height=height)
elif source == 'qd' and dest == 'mlt':
lat, lon = self.qd2apex(lat, lon, height=height)
lon = self.mlon2mlt(lon, datetime, ssheight=ssheight)
# from mlt (input latitude assumed apex)
elif source == 'mlt' and dest == 'geo':
lon = self.mlt2mlon(lon, datetime, ssheight=ssheight)
lat, lon, _ = self.apex2geo(lat, lon, height, precision=precision)
elif source == 'mlt' and dest == 'apex':
lon = self.mlt2mlon(lon, datetime, ssheight=ssheight)
elif source == 'mlt' and dest == 'qd':
lon = self.mlt2mlon(lon, datetime, ssheight=ssheight)
lat, lon = self.apex2qd(lat, lon, height=height)
# no other transformations are implemented
else:
estr = 'Unknown coordinate transformation: '
estr += '{} -> {}'.format(source, dest)
raise NotImplementedError(estr)
return lat, lon | def function[convert, parameter[self, lat, lon, source, dest, height, datetime, precision, ssheight]]:
constant[Converts between geodetic, modified apex, quasi-dipole and MLT.
Parameters
==========
lat : array_like
Latitude
lon : array_like
Longitude/MLT
source : {'geo', 'apex', 'qd', 'mlt'}
Input coordinate system
dest : {'geo', 'apex', 'qd', 'mlt'}
Output coordinate system
height : array_like, optional
Altitude in km
datetime : :class:`datetime.datetime`
Date and time for MLT conversions (required for MLT conversions)
precision : float, optional
Precision of output (degrees) when converting to geo. A negative
value of this argument produces a low-precision calculation of
geodetic lat/lon based only on their spherical harmonic
representation.
A positive value causes the underlying Fortran routine to iterate
until feeding the output geo lat/lon into geo2qd (APXG2Q) reproduces
the input QD lat/lon to within the specified precision (all
coordinates being converted to geo are converted to QD first and
passed through APXG2Q).
ssheight : float, optional
Altitude in km to use for converting the subsolar point from
geographic to magnetic coordinates. A high altitude is used
to ensure the subsolar point is mapped to high latitudes, which
prevents the South-Atlantic Anomaly (SAA) from influencing the MLT.
Returns
=======
lat : ndarray or float
Converted latitude (if converting to MLT, output latitude is apex)
lat : ndarray or float
Converted longitude/MLT
]
if <ast.BoolOp object at 0x7da1b0e62500> begin[:]
<ast.Raise object at 0x7da1b0e61480>
variable[lat] assign[=] call[name[helpers].checklat, parameter[name[lat]]]
if compare[name[source] equal[==] name[dest]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b0e628c0>, <ast.Name object at 0x7da1b0e613c0>]]]
return[tuple[[<ast.Name object at 0x7da1b0edaf50>, <ast.Name object at 0x7da1b0edb610>]]] | keyword[def] identifier[convert] ( identifier[self] , identifier[lat] , identifier[lon] , identifier[source] , identifier[dest] , identifier[height] = literal[int] , identifier[datetime] = keyword[None] ,
identifier[precision] = literal[int] , identifier[ssheight] = literal[int] * literal[int] ):
literal[string]
keyword[if] identifier[datetime] keyword[is] keyword[None] keyword[and] ( literal[string] keyword[in] [ identifier[source] , identifier[dest] ]):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[lat] = identifier[helpers] . identifier[checklat] ( identifier[lat] )
keyword[if] identifier[source] == identifier[dest] :
keyword[return] identifier[lat] , identifier[lon]
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lat] , identifier[lon] = identifier[self] . identifier[geo2apex] ( identifier[lat] , identifier[lon] , identifier[height] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lat] , identifier[lon] = identifier[self] . identifier[geo2qd] ( identifier[lat] , identifier[lon] , identifier[height] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lat] , identifier[lon] = identifier[self] . identifier[geo2apex] ( identifier[lat] , identifier[lon] , identifier[height] )
identifier[lon] = identifier[self] . identifier[mlon2mlt] ( identifier[lon] , identifier[datetime] , identifier[ssheight] = identifier[ssheight] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lat] , identifier[lon] , identifier[_] = identifier[self] . identifier[apex2geo] ( identifier[lat] , identifier[lon] , identifier[height] , identifier[precision] = identifier[precision] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lat] , identifier[lon] = identifier[self] . identifier[apex2qd] ( identifier[lat] , identifier[lon] , identifier[height] = identifier[height] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lon] = identifier[self] . identifier[mlon2mlt] ( identifier[lon] , identifier[datetime] , identifier[ssheight] = identifier[ssheight] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lat] , identifier[lon] , identifier[_] = identifier[self] . identifier[qd2geo] ( identifier[lat] , identifier[lon] , identifier[height] , identifier[precision] = identifier[precision] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lat] , identifier[lon] = identifier[self] . identifier[qd2apex] ( identifier[lat] , identifier[lon] , identifier[height] = identifier[height] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lat] , identifier[lon] = identifier[self] . identifier[qd2apex] ( identifier[lat] , identifier[lon] , identifier[height] = identifier[height] )
identifier[lon] = identifier[self] . identifier[mlon2mlt] ( identifier[lon] , identifier[datetime] , identifier[ssheight] = identifier[ssheight] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lon] = identifier[self] . identifier[mlt2mlon] ( identifier[lon] , identifier[datetime] , identifier[ssheight] = identifier[ssheight] )
identifier[lat] , identifier[lon] , identifier[_] = identifier[self] . identifier[apex2geo] ( identifier[lat] , identifier[lon] , identifier[height] , identifier[precision] = identifier[precision] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lon] = identifier[self] . identifier[mlt2mlon] ( identifier[lon] , identifier[datetime] , identifier[ssheight] = identifier[ssheight] )
keyword[elif] identifier[source] == literal[string] keyword[and] identifier[dest] == literal[string] :
identifier[lon] = identifier[self] . identifier[mlt2mlon] ( identifier[lon] , identifier[datetime] , identifier[ssheight] = identifier[ssheight] )
identifier[lat] , identifier[lon] = identifier[self] . identifier[apex2qd] ( identifier[lat] , identifier[lon] , identifier[height] = identifier[height] )
keyword[else] :
identifier[estr] = literal[string]
identifier[estr] += literal[string] . identifier[format] ( identifier[source] , identifier[dest] )
keyword[raise] identifier[NotImplementedError] ( identifier[estr] )
keyword[return] identifier[lat] , identifier[lon] | def convert(self, lat, lon, source, dest, height=0, datetime=None, precision=1e-10, ssheight=50 * 6371):
"""Converts between geodetic, modified apex, quasi-dipole and MLT.
Parameters
==========
lat : array_like
Latitude
lon : array_like
Longitude/MLT
source : {'geo', 'apex', 'qd', 'mlt'}
Input coordinate system
dest : {'geo', 'apex', 'qd', 'mlt'}
Output coordinate system
height : array_like, optional
Altitude in km
datetime : :class:`datetime.datetime`
Date and time for MLT conversions (required for MLT conversions)
precision : float, optional
Precision of output (degrees) when converting to geo. A negative
value of this argument produces a low-precision calculation of
geodetic lat/lon based only on their spherical harmonic
representation.
A positive value causes the underlying Fortran routine to iterate
until feeding the output geo lat/lon into geo2qd (APXG2Q) reproduces
the input QD lat/lon to within the specified precision (all
coordinates being converted to geo are converted to QD first and
passed through APXG2Q).
ssheight : float, optional
Altitude in km to use for converting the subsolar point from
geographic to magnetic coordinates. A high altitude is used
to ensure the subsolar point is mapped to high latitudes, which
prevents the South-Atlantic Anomaly (SAA) from influencing the MLT.
Returns
=======
lat : ndarray or float
Converted latitude (if converting to MLT, output latitude is apex)
lat : ndarray or float
Converted longitude/MLT
"""
if datetime is None and 'mlt' in [source, dest]:
raise ValueError('datetime must be given for MLT calculations') # depends on [control=['if'], data=[]]
lat = helpers.checklat(lat)
if source == dest:
return (lat, lon) # depends on [control=['if'], data=[]]
# from geo
elif source == 'geo' and dest == 'apex':
(lat, lon) = self.geo2apex(lat, lon, height) # depends on [control=['if'], data=[]]
elif source == 'geo' and dest == 'qd':
(lat, lon) = self.geo2qd(lat, lon, height) # depends on [control=['if'], data=[]]
elif source == 'geo' and dest == 'mlt':
(lat, lon) = self.geo2apex(lat, lon, height)
lon = self.mlon2mlt(lon, datetime, ssheight=ssheight) # depends on [control=['if'], data=[]]
# from apex
elif source == 'apex' and dest == 'geo':
(lat, lon, _) = self.apex2geo(lat, lon, height, precision=precision) # depends on [control=['if'], data=[]]
elif source == 'apex' and dest == 'qd':
(lat, lon) = self.apex2qd(lat, lon, height=height) # depends on [control=['if'], data=[]]
elif source == 'apex' and dest == 'mlt':
lon = self.mlon2mlt(lon, datetime, ssheight=ssheight) # depends on [control=['if'], data=[]]
# from qd
elif source == 'qd' and dest == 'geo':
(lat, lon, _) = self.qd2geo(lat, lon, height, precision=precision) # depends on [control=['if'], data=[]]
elif source == 'qd' and dest == 'apex':
(lat, lon) = self.qd2apex(lat, lon, height=height) # depends on [control=['if'], data=[]]
elif source == 'qd' and dest == 'mlt':
(lat, lon) = self.qd2apex(lat, lon, height=height)
lon = self.mlon2mlt(lon, datetime, ssheight=ssheight) # depends on [control=['if'], data=[]]
# from mlt (input latitude assumed apex)
elif source == 'mlt' and dest == 'geo':
lon = self.mlt2mlon(lon, datetime, ssheight=ssheight)
(lat, lon, _) = self.apex2geo(lat, lon, height, precision=precision) # depends on [control=['if'], data=[]]
elif source == 'mlt' and dest == 'apex':
lon = self.mlt2mlon(lon, datetime, ssheight=ssheight) # depends on [control=['if'], data=[]]
elif source == 'mlt' and dest == 'qd':
lon = self.mlt2mlon(lon, datetime, ssheight=ssheight)
(lat, lon) = self.apex2qd(lat, lon, height=height) # depends on [control=['if'], data=[]]
else:
# no other transformations are implemented
estr = 'Unknown coordinate transformation: '
estr += '{} -> {}'.format(source, dest)
raise NotImplementedError(estr)
return (lat, lon) |
def linkify_h_by_h(self):
"""Link hosts with their parents
:return: None
"""
for host in self:
# The new member list
new_parents = []
for parent in getattr(host, 'parents', []):
parent = parent.strip()
o_parent = self.find_by_name(parent)
if o_parent is not None:
new_parents.append(o_parent.uuid)
else:
err = "the parent '%s' for the host '%s' is unknown!" % (parent,
host.get_name())
self.add_error(err)
# We find the id, we replace the names
host.parents = new_parents | def function[linkify_h_by_h, parameter[self]]:
constant[Link hosts with their parents
:return: None
]
for taget[name[host]] in starred[name[self]] begin[:]
variable[new_parents] assign[=] list[[]]
for taget[name[parent]] in starred[call[name[getattr], parameter[name[host], constant[parents], list[[]]]]] begin[:]
variable[parent] assign[=] call[name[parent].strip, parameter[]]
variable[o_parent] assign[=] call[name[self].find_by_name, parameter[name[parent]]]
if compare[name[o_parent] is_not constant[None]] begin[:]
call[name[new_parents].append, parameter[name[o_parent].uuid]]
name[host].parents assign[=] name[new_parents] | keyword[def] identifier[linkify_h_by_h] ( identifier[self] ):
literal[string]
keyword[for] identifier[host] keyword[in] identifier[self] :
identifier[new_parents] =[]
keyword[for] identifier[parent] keyword[in] identifier[getattr] ( identifier[host] , literal[string] ,[]):
identifier[parent] = identifier[parent] . identifier[strip] ()
identifier[o_parent] = identifier[self] . identifier[find_by_name] ( identifier[parent] )
keyword[if] identifier[o_parent] keyword[is] keyword[not] keyword[None] :
identifier[new_parents] . identifier[append] ( identifier[o_parent] . identifier[uuid] )
keyword[else] :
identifier[err] = literal[string] %( identifier[parent] ,
identifier[host] . identifier[get_name] ())
identifier[self] . identifier[add_error] ( identifier[err] )
identifier[host] . identifier[parents] = identifier[new_parents] | def linkify_h_by_h(self):
"""Link hosts with their parents
:return: None
"""
for host in self:
# The new member list
new_parents = []
for parent in getattr(host, 'parents', []):
parent = parent.strip()
o_parent = self.find_by_name(parent)
if o_parent is not None:
new_parents.append(o_parent.uuid) # depends on [control=['if'], data=['o_parent']]
else:
err = "the parent '%s' for the host '%s' is unknown!" % (parent, host.get_name())
self.add_error(err) # depends on [control=['for'], data=['parent']]
# We find the id, we replace the names
host.parents = new_parents # depends on [control=['for'], data=['host']] |
def from_genesis_header(cls,
base_db: BaseDB,
genesis_header: BlockHeader) -> 'BaseHeaderChain':
"""
Initializes the chain from the genesis header.
"""
headerdb = cls.get_headerdb_class()(cast(BaseAtomicDB, base_db))
headerdb.persist_header(genesis_header)
return cls(base_db, genesis_header) | def function[from_genesis_header, parameter[cls, base_db, genesis_header]]:
constant[
Initializes the chain from the genesis header.
]
variable[headerdb] assign[=] call[call[name[cls].get_headerdb_class, parameter[]], parameter[call[name[cast], parameter[name[BaseAtomicDB], name[base_db]]]]]
call[name[headerdb].persist_header, parameter[name[genesis_header]]]
return[call[name[cls], parameter[name[base_db], name[genesis_header]]]] | keyword[def] identifier[from_genesis_header] ( identifier[cls] ,
identifier[base_db] : identifier[BaseDB] ,
identifier[genesis_header] : identifier[BlockHeader] )-> literal[string] :
literal[string]
identifier[headerdb] = identifier[cls] . identifier[get_headerdb_class] ()( identifier[cast] ( identifier[BaseAtomicDB] , identifier[base_db] ))
identifier[headerdb] . identifier[persist_header] ( identifier[genesis_header] )
keyword[return] identifier[cls] ( identifier[base_db] , identifier[genesis_header] ) | def from_genesis_header(cls, base_db: BaseDB, genesis_header: BlockHeader) -> 'BaseHeaderChain':
"""
Initializes the chain from the genesis header.
"""
headerdb = cls.get_headerdb_class()(cast(BaseAtomicDB, base_db))
headerdb.persist_header(genesis_header)
return cls(base_db, genesis_header) |
def typed_returnvalue(self, type_name, formatter=None):
"""Add type information to the return value of this function.
Args:
type_name (str): The name of the type of the return value.
formatter (str): An optional name of a formatting function specified
for the type given in type_name.
"""
self.return_info = ReturnInfo(type_name, formatter, True, None) | def function[typed_returnvalue, parameter[self, type_name, formatter]]:
constant[Add type information to the return value of this function.
Args:
type_name (str): The name of the type of the return value.
formatter (str): An optional name of a formatting function specified
for the type given in type_name.
]
name[self].return_info assign[=] call[name[ReturnInfo], parameter[name[type_name], name[formatter], constant[True], constant[None]]] | keyword[def] identifier[typed_returnvalue] ( identifier[self] , identifier[type_name] , identifier[formatter] = keyword[None] ):
literal[string]
identifier[self] . identifier[return_info] = identifier[ReturnInfo] ( identifier[type_name] , identifier[formatter] , keyword[True] , keyword[None] ) | def typed_returnvalue(self, type_name, formatter=None):
"""Add type information to the return value of this function.
Args:
type_name (str): The name of the type of the return value.
formatter (str): An optional name of a formatting function specified
for the type given in type_name.
"""
self.return_info = ReturnInfo(type_name, formatter, True, None) |
def get_bios_settings_result(self):
"""Gets the result of the bios settings applied
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
try:
settings_result = sushy_system.bios_settings.messages
except sushy.exceptions.SushyError as e:
msg = (self._('The BIOS Settings results were not found. Error '
'%(error)s') %
{'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
status = "failed" if len(settings_result) > 1 else "success"
return {"status": status, "results": settings_result} | def function[get_bios_settings_result, parameter[self]]:
constant[Gets the result of the bios settings applied
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server.
]
variable[sushy_system] assign[=] call[name[self]._get_sushy_system, parameter[name[PROLIANT_SYSTEM_ID]]]
<ast.Try object at 0x7da1b197ca60>
variable[status] assign[=] <ast.IfExp object at 0x7da1b197fee0>
return[dictionary[[<ast.Constant object at 0x7da1b197f430>, <ast.Constant object at 0x7da1b197d7e0>], [<ast.Name object at 0x7da1b197d150>, <ast.Name object at 0x7da1b197d030>]]] | keyword[def] identifier[get_bios_settings_result] ( identifier[self] ):
literal[string]
identifier[sushy_system] = identifier[self] . identifier[_get_sushy_system] ( identifier[PROLIANT_SYSTEM_ID] )
keyword[try] :
identifier[settings_result] = identifier[sushy_system] . identifier[bios_settings] . identifier[messages]
keyword[except] identifier[sushy] . identifier[exceptions] . identifier[SushyError] keyword[as] identifier[e] :
identifier[msg] =( identifier[self] . identifier[_] ( literal[string]
literal[string] )%
{ literal[string] : identifier[str] ( identifier[e] )})
identifier[LOG] . identifier[debug] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[IloError] ( identifier[msg] )
identifier[status] = literal[string] keyword[if] identifier[len] ( identifier[settings_result] )> literal[int] keyword[else] literal[string]
keyword[return] { literal[string] : identifier[status] , literal[string] : identifier[settings_result] } | def get_bios_settings_result(self):
"""Gets the result of the bios settings applied
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
try:
settings_result = sushy_system.bios_settings.messages # depends on [control=['try'], data=[]]
except sushy.exceptions.SushyError as e:
msg = self._('The BIOS Settings results were not found. Error %(error)s') % {'error': str(e)}
LOG.debug(msg)
raise exception.IloError(msg) # depends on [control=['except'], data=['e']]
status = 'failed' if len(settings_result) > 1 else 'success'
return {'status': status, 'results': settings_result} |
def ensure_request(request):
"""Used for requests.request / Requests.request with **ensure_request(request)
:param request: dict or curl-string or url
>>> from torequests.utils import ensure_request
>>> ensure_request('''curl http://test.com''')
{'url': 'http://test.com', 'method': 'get'}
>>> ensure_request('http://test.com')
{'method': 'get', 'url': 'http://test.com'}
>>> ensure_request({'method': 'get', 'url': 'http://test.com'})
{'method': 'get', 'url': 'http://test.com'}
>>> ensure_request({'url': 'http://test.com'})
{'url': 'http://test.com', 'method': 'get'}
"""
if isinstance(request, dict):
result = request
elif isinstance(request, (unicode, str)):
request = request.strip()
if request.startswith("http"):
result = {"method": "get", "url": request}
elif request.startswith("curl "):
result = curlparse(request)
else:
raise ValueError("request should be dict or str.")
result["method"] = result.setdefault("method", "get").lower()
return result | def function[ensure_request, parameter[request]]:
constant[Used for requests.request / Requests.request with **ensure_request(request)
:param request: dict or curl-string or url
>>> from torequests.utils import ensure_request
>>> ensure_request('''curl http://test.com''')
{'url': 'http://test.com', 'method': 'get'}
>>> ensure_request('http://test.com')
{'method': 'get', 'url': 'http://test.com'}
>>> ensure_request({'method': 'get', 'url': 'http://test.com'})
{'method': 'get', 'url': 'http://test.com'}
>>> ensure_request({'url': 'http://test.com'})
{'url': 'http://test.com', 'method': 'get'}
]
if call[name[isinstance], parameter[name[request], name[dict]]] begin[:]
variable[result] assign[=] name[request]
call[name[result]][constant[method]] assign[=] call[call[name[result].setdefault, parameter[constant[method], constant[get]]].lower, parameter[]]
return[name[result]] | keyword[def] identifier[ensure_request] ( identifier[request] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[request] , identifier[dict] ):
identifier[result] = identifier[request]
keyword[elif] identifier[isinstance] ( identifier[request] ,( identifier[unicode] , identifier[str] )):
identifier[request] = identifier[request] . identifier[strip] ()
keyword[if] identifier[request] . identifier[startswith] ( literal[string] ):
identifier[result] ={ literal[string] : literal[string] , literal[string] : identifier[request] }
keyword[elif] identifier[request] . identifier[startswith] ( literal[string] ):
identifier[result] = identifier[curlparse] ( identifier[request] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[result] [ literal[string] ]= identifier[result] . identifier[setdefault] ( literal[string] , literal[string] ). identifier[lower] ()
keyword[return] identifier[result] | def ensure_request(request):
"""Used for requests.request / Requests.request with **ensure_request(request)
:param request: dict or curl-string or url
>>> from torequests.utils import ensure_request
>>> ensure_request('''curl http://test.com''')
{'url': 'http://test.com', 'method': 'get'}
>>> ensure_request('http://test.com')
{'method': 'get', 'url': 'http://test.com'}
>>> ensure_request({'method': 'get', 'url': 'http://test.com'})
{'method': 'get', 'url': 'http://test.com'}
>>> ensure_request({'url': 'http://test.com'})
{'url': 'http://test.com', 'method': 'get'}
"""
if isinstance(request, dict):
result = request # depends on [control=['if'], data=[]]
elif isinstance(request, (unicode, str)):
request = request.strip()
if request.startswith('http'):
result = {'method': 'get', 'url': request} # depends on [control=['if'], data=[]]
elif request.startswith('curl '):
result = curlparse(request) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('request should be dict or str.')
result['method'] = result.setdefault('method', 'get').lower()
return result |
def bhp2pascal(bhp, cfm, fan_tot_eff):
"""return inputs for E+ in pascal and m3/s"""
inh2o = bhp * 6356.0 * fan_tot_eff / cfm
pascal = inh2o2pascal(inh2o)
m3s = cfm2m3s(cfm)
return pascal, m3s | def function[bhp2pascal, parameter[bhp, cfm, fan_tot_eff]]:
constant[return inputs for E+ in pascal and m3/s]
variable[inh2o] assign[=] binary_operation[binary_operation[binary_operation[name[bhp] * constant[6356.0]] * name[fan_tot_eff]] / name[cfm]]
variable[pascal] assign[=] call[name[inh2o2pascal], parameter[name[inh2o]]]
variable[m3s] assign[=] call[name[cfm2m3s], parameter[name[cfm]]]
return[tuple[[<ast.Name object at 0x7da18f00e620>, <ast.Name object at 0x7da18f00c340>]]] | keyword[def] identifier[bhp2pascal] ( identifier[bhp] , identifier[cfm] , identifier[fan_tot_eff] ):
literal[string]
identifier[inh2o] = identifier[bhp] * literal[int] * identifier[fan_tot_eff] / identifier[cfm]
identifier[pascal] = identifier[inh2o2pascal] ( identifier[inh2o] )
identifier[m3s] = identifier[cfm2m3s] ( identifier[cfm] )
keyword[return] identifier[pascal] , identifier[m3s] | def bhp2pascal(bhp, cfm, fan_tot_eff):
"""return inputs for E+ in pascal and m3/s"""
inh2o = bhp * 6356.0 * fan_tot_eff / cfm
pascal = inh2o2pascal(inh2o)
m3s = cfm2m3s(cfm)
return (pascal, m3s) |
def kl_prep(self,mlt_df):
""" prepare KL based parameterizations
Parameters
----------
mlt_df : pandas.DataFrame
a dataframe with multiplier array information
Note
----
calls pyemu.helpers.setup_kl()
"""
if len(self.kl_props) == 0:
return
if self.kl_geostruct is None:
self.logger.warn("kl_geostruct is None,"\
" using ExpVario with contribution=1 and a=(10.0*max(delr,delc))")
kl_dist = 10.0 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0,a=kl_dist)
self.kl_geostruct = pyemu.geostats.GeoStruct(variograms=v)
kl_df = mlt_df.loc[mlt_df.suffix==self.kl_suffix,:]
layers = kl_df.layer.unique()
#kl_dict = {l:list(kl_df.loc[kl_df.layer==l,"prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
#for i,l in enumerate(layers):
# p = set(kl_dict[l])
# for ll in layers[i+1:]:
# pp = set(kl_dict[ll])
# d = pp - p
# kl_dict[ll] = list(d)
kl_prefix = list(kl_df.loc[:,"prefix"])
kl_array_file = {p:m for p,m in zip(kl_df.prefix,kl_df.mlt_file)}
self.logger.statement("kl_prefix: {0}".format(str(kl_prefix)))
fac_file = os.path.join(self.m.model_ws, "kl.fac")
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_df = kl_setup(self.kl_num_eig,self.m.sr,self.kl_geostruct,kl_prefix,
factors_file=fac_file,basis_file=fac_file+".basis.jcb",
tpl_dir=self.m.model_ws)
self.logger.statement("{0} kl parameters created".
format(kl_df.shape[0]))
self.logger.statement("kl 'pargp':{0}".
format(','.join(kl_df.pargp.unique())))
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_mlt_df = mlt_df.loc[mlt_df.suffix==self.kl_suffix]
for prefix in kl_df.prefix.unique():
prefix_df = kl_df.loc[kl_df.prefix==prefix,:]
in_file = os.path.split(prefix_df.loc[:,"in_file"].iloc[0])[-1]
assert prefix in mlt_df.prefix.values,"{0}:{1}".format(prefix,mlt_df.prefix)
mlt_df.loc[mlt_df.prefix==prefix,"pp_file"] = in_file
mlt_df.loc[mlt_df.prefix==prefix,"fac_file"] = os.path.split(fac_file)[-1]
print(kl_mlt_df)
mlt_df.loc[mlt_df.suffix == self.kl_suffix, "tpl_file"] = np.NaN
self.par_dfs[self.kl_suffix] = kl_df | def function[kl_prep, parameter[self, mlt_df]]:
constant[ prepare KL based parameterizations
Parameters
----------
mlt_df : pandas.DataFrame
a dataframe with multiplier array information
Note
----
calls pyemu.helpers.setup_kl()
]
if compare[call[name[len], parameter[name[self].kl_props]] equal[==] constant[0]] begin[:]
return[None]
if compare[name[self].kl_geostruct is constant[None]] begin[:]
call[name[self].logger.warn, parameter[constant[kl_geostruct is None, using ExpVario with contribution=1 and a=(10.0*max(delr,delc))]]]
variable[kl_dist] assign[=] binary_operation[constant[10.0] * call[name[float], parameter[call[name[max], parameter[call[name[self].m.dis.delr.array.max, parameter[]], call[name[self].m.dis.delc.array.max, parameter[]]]]]]]
variable[v] assign[=] call[name[pyemu].geostats.ExpVario, parameter[]]
name[self].kl_geostruct assign[=] call[name[pyemu].geostats.GeoStruct, parameter[]]
variable[kl_df] assign[=] call[name[mlt_df].loc][tuple[[<ast.Compare object at 0x7da1b1d95600>, <ast.Slice object at 0x7da1b1d958a0>]]]
variable[layers] assign[=] call[name[kl_df].layer.unique, parameter[]]
variable[kl_prefix] assign[=] call[name[list], parameter[call[name[kl_df].loc][tuple[[<ast.Slice object at 0x7da1b1d94520>, <ast.Constant object at 0x7da1b1d94640>]]]]]
variable[kl_array_file] assign[=] <ast.DictComp object at 0x7da1b1d944f0>
call[name[self].logger.statement, parameter[call[constant[kl_prefix: {0}].format, parameter[call[name[str], parameter[name[kl_prefix]]]]]]]
variable[fac_file] assign[=] call[name[os].path.join, parameter[name[self].m.model_ws, constant[kl.fac]]]
call[name[self].log, parameter[call[constant[calling kl_setup() with factors file {0}].format, parameter[name[fac_file]]]]]
variable[kl_df] assign[=] call[name[kl_setup], parameter[name[self].kl_num_eig, name[self].m.sr, name[self].kl_geostruct, name[kl_prefix]]]
call[name[self].logger.statement, parameter[call[constant[{0} kl parameters created].format, parameter[call[name[kl_df].shape][constant[0]]]]]]
call[name[self].logger.statement, parameter[call[constant[kl 'pargp':{0}].format, parameter[call[constant[,].join, parameter[call[name[kl_df].pargp.unique, parameter[]]]]]]]]
call[name[self].log, parameter[call[constant[calling kl_setup() with factors file {0}].format, parameter[name[fac_file]]]]]
variable[kl_mlt_df] assign[=] call[name[mlt_df].loc][compare[name[mlt_df].suffix equal[==] name[self].kl_suffix]]
for taget[name[prefix]] in starred[call[name[kl_df].prefix.unique, parameter[]]] begin[:]
variable[prefix_df] assign[=] call[name[kl_df].loc][tuple[[<ast.Compare object at 0x7da1b1d94ac0>, <ast.Slice object at 0x7da1b1d95de0>]]]
variable[in_file] assign[=] call[call[name[os].path.split, parameter[call[call[name[prefix_df].loc][tuple[[<ast.Slice object at 0x7da1b1d94700>, <ast.Constant object at 0x7da1b1d95510>]]].iloc][constant[0]]]]][<ast.UnaryOp object at 0x7da1b1d966b0>]
assert[compare[name[prefix] in name[mlt_df].prefix.values]]
call[name[mlt_df].loc][tuple[[<ast.Compare object at 0x7da1b1d47d30>, <ast.Constant object at 0x7da1b1d47c70>]]] assign[=] name[in_file]
call[name[mlt_df].loc][tuple[[<ast.Compare object at 0x7da1b1d47b20>, <ast.Constant object at 0x7da1b1d47a60>]]] assign[=] call[call[name[os].path.split, parameter[name[fac_file]]]][<ast.UnaryOp object at 0x7da1b1d47910>]
call[name[print], parameter[name[kl_mlt_df]]]
call[name[mlt_df].loc][tuple[[<ast.Compare object at 0x7da1b1d476d0>, <ast.Constant object at 0x7da1b1d475e0>]]] assign[=] name[np].NaN
call[name[self].par_dfs][name[self].kl_suffix] assign[=] name[kl_df] | keyword[def] identifier[kl_prep] ( identifier[self] , identifier[mlt_df] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[kl_props] )== literal[int] :
keyword[return]
keyword[if] identifier[self] . identifier[kl_geostruct] keyword[is] keyword[None] :
identifier[self] . identifier[logger] . identifier[warn] ( literal[string] literal[string] )
identifier[kl_dist] = literal[int] * identifier[float] ( identifier[max] ( identifier[self] . identifier[m] . identifier[dis] . identifier[delr] . identifier[array] . identifier[max] (),
identifier[self] . identifier[m] . identifier[dis] . identifier[delc] . identifier[array] . identifier[max] ()))
identifier[v] = identifier[pyemu] . identifier[geostats] . identifier[ExpVario] ( identifier[contribution] = literal[int] , identifier[a] = identifier[kl_dist] )
identifier[self] . identifier[kl_geostruct] = identifier[pyemu] . identifier[geostats] . identifier[GeoStruct] ( identifier[variograms] = identifier[v] )
identifier[kl_df] = identifier[mlt_df] . identifier[loc] [ identifier[mlt_df] . identifier[suffix] == identifier[self] . identifier[kl_suffix] ,:]
identifier[layers] = identifier[kl_df] . identifier[layer] . identifier[unique] ()
identifier[kl_prefix] = identifier[list] ( identifier[kl_df] . identifier[loc] [:, literal[string] ])
identifier[kl_array_file] ={ identifier[p] : identifier[m] keyword[for] identifier[p] , identifier[m] keyword[in] identifier[zip] ( identifier[kl_df] . identifier[prefix] , identifier[kl_df] . identifier[mlt_file] )}
identifier[self] . identifier[logger] . identifier[statement] ( literal[string] . identifier[format] ( identifier[str] ( identifier[kl_prefix] )))
identifier[fac_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[m] . identifier[model_ws] , literal[string] )
identifier[self] . identifier[log] ( literal[string] . identifier[format] ( identifier[fac_file] ))
identifier[kl_df] = identifier[kl_setup] ( identifier[self] . identifier[kl_num_eig] , identifier[self] . identifier[m] . identifier[sr] , identifier[self] . identifier[kl_geostruct] , identifier[kl_prefix] ,
identifier[factors_file] = identifier[fac_file] , identifier[basis_file] = identifier[fac_file] + literal[string] ,
identifier[tpl_dir] = identifier[self] . identifier[m] . identifier[model_ws] )
identifier[self] . identifier[logger] . identifier[statement] ( literal[string] .
identifier[format] ( identifier[kl_df] . identifier[shape] [ literal[int] ]))
identifier[self] . identifier[logger] . identifier[statement] ( literal[string] .
identifier[format] ( literal[string] . identifier[join] ( identifier[kl_df] . identifier[pargp] . identifier[unique] ())))
identifier[self] . identifier[log] ( literal[string] . identifier[format] ( identifier[fac_file] ))
identifier[kl_mlt_df] = identifier[mlt_df] . identifier[loc] [ identifier[mlt_df] . identifier[suffix] == identifier[self] . identifier[kl_suffix] ]
keyword[for] identifier[prefix] keyword[in] identifier[kl_df] . identifier[prefix] . identifier[unique] ():
identifier[prefix_df] = identifier[kl_df] . identifier[loc] [ identifier[kl_df] . identifier[prefix] == identifier[prefix] ,:]
identifier[in_file] = identifier[os] . identifier[path] . identifier[split] ( identifier[prefix_df] . identifier[loc] [:, literal[string] ]. identifier[iloc] [ literal[int] ])[- literal[int] ]
keyword[assert] identifier[prefix] keyword[in] identifier[mlt_df] . identifier[prefix] . identifier[values] , literal[string] . identifier[format] ( identifier[prefix] , identifier[mlt_df] . identifier[prefix] )
identifier[mlt_df] . identifier[loc] [ identifier[mlt_df] . identifier[prefix] == identifier[prefix] , literal[string] ]= identifier[in_file]
identifier[mlt_df] . identifier[loc] [ identifier[mlt_df] . identifier[prefix] == identifier[prefix] , literal[string] ]= identifier[os] . identifier[path] . identifier[split] ( identifier[fac_file] )[- literal[int] ]
identifier[print] ( identifier[kl_mlt_df] )
identifier[mlt_df] . identifier[loc] [ identifier[mlt_df] . identifier[suffix] == identifier[self] . identifier[kl_suffix] , literal[string] ]= identifier[np] . identifier[NaN]
identifier[self] . identifier[par_dfs] [ identifier[self] . identifier[kl_suffix] ]= identifier[kl_df] | def kl_prep(self, mlt_df):
""" prepare KL based parameterizations
Parameters
----------
mlt_df : pandas.DataFrame
a dataframe with multiplier array information
Note
----
calls pyemu.helpers.setup_kl()
"""
if len(self.kl_props) == 0:
return # depends on [control=['if'], data=[]]
if self.kl_geostruct is None:
self.logger.warn('kl_geostruct is None, using ExpVario with contribution=1 and a=(10.0*max(delr,delc))')
kl_dist = 10.0 * float(max(self.m.dis.delr.array.max(), self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=kl_dist)
self.kl_geostruct = pyemu.geostats.GeoStruct(variograms=v) # depends on [control=['if'], data=[]]
kl_df = mlt_df.loc[mlt_df.suffix == self.kl_suffix, :]
layers = kl_df.layer.unique()
#kl_dict = {l:list(kl_df.loc[kl_df.layer==l,"prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
#for i,l in enumerate(layers):
# p = set(kl_dict[l])
# for ll in layers[i+1:]:
# pp = set(kl_dict[ll])
# d = pp - p
# kl_dict[ll] = list(d)
kl_prefix = list(kl_df.loc[:, 'prefix'])
kl_array_file = {p: m for (p, m) in zip(kl_df.prefix, kl_df.mlt_file)}
self.logger.statement('kl_prefix: {0}'.format(str(kl_prefix)))
fac_file = os.path.join(self.m.model_ws, 'kl.fac')
self.log('calling kl_setup() with factors file {0}'.format(fac_file))
kl_df = kl_setup(self.kl_num_eig, self.m.sr, self.kl_geostruct, kl_prefix, factors_file=fac_file, basis_file=fac_file + '.basis.jcb', tpl_dir=self.m.model_ws)
self.logger.statement('{0} kl parameters created'.format(kl_df.shape[0]))
self.logger.statement("kl 'pargp':{0}".format(','.join(kl_df.pargp.unique())))
self.log('calling kl_setup() with factors file {0}'.format(fac_file))
kl_mlt_df = mlt_df.loc[mlt_df.suffix == self.kl_suffix]
for prefix in kl_df.prefix.unique():
prefix_df = kl_df.loc[kl_df.prefix == prefix, :]
in_file = os.path.split(prefix_df.loc[:, 'in_file'].iloc[0])[-1]
assert prefix in mlt_df.prefix.values, '{0}:{1}'.format(prefix, mlt_df.prefix)
mlt_df.loc[mlt_df.prefix == prefix, 'pp_file'] = in_file
mlt_df.loc[mlt_df.prefix == prefix, 'fac_file'] = os.path.split(fac_file)[-1] # depends on [control=['for'], data=['prefix']]
print(kl_mlt_df)
mlt_df.loc[mlt_df.suffix == self.kl_suffix, 'tpl_file'] = np.NaN
self.par_dfs[self.kl_suffix] = kl_df |
def hint_for_accuracy(self, accuracy="normal"):
"""
Returns a :class:`Hint` object with the suggested value of ecut [Ha] and
pawecutdg [Ha] for the given accuracy.
ecut and pawecutdg are set to zero if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
"""
if not self.has_dojo_report:
return Hint(ecut=0., pawecutdg=0.)
# Get hints from dojoreport. Try first in hints then in ppgen_hints.
if "hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["hints"][accuracy])
elif "ppgen_hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["ppgen_hints"][accuracy])
return Hint(ecut=0., pawecutdg=0.) | def function[hint_for_accuracy, parameter[self, accuracy]]:
constant[
Returns a :class:`Hint` object with the suggested value of ecut [Ha] and
pawecutdg [Ha] for the given accuracy.
ecut and pawecutdg are set to zero if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
]
if <ast.UnaryOp object at 0x7da2047ea740> begin[:]
return[call[name[Hint], parameter[]]]
if compare[constant[hints] in name[self].dojo_report] begin[:]
return[call[name[Hint].from_dict, parameter[call[call[name[self].dojo_report][constant[hints]]][name[accuracy]]]]]
return[call[name[Hint], parameter[]]] | keyword[def] identifier[hint_for_accuracy] ( identifier[self] , identifier[accuracy] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_dojo_report] :
keyword[return] identifier[Hint] ( identifier[ecut] = literal[int] , identifier[pawecutdg] = literal[int] )
keyword[if] literal[string] keyword[in] identifier[self] . identifier[dojo_report] :
keyword[return] identifier[Hint] . identifier[from_dict] ( identifier[self] . identifier[dojo_report] [ literal[string] ][ identifier[accuracy] ])
keyword[elif] literal[string] keyword[in] identifier[self] . identifier[dojo_report] :
keyword[return] identifier[Hint] . identifier[from_dict] ( identifier[self] . identifier[dojo_report] [ literal[string] ][ identifier[accuracy] ])
keyword[return] identifier[Hint] ( identifier[ecut] = literal[int] , identifier[pawecutdg] = literal[int] ) | def hint_for_accuracy(self, accuracy='normal'):
"""
Returns a :class:`Hint` object with the suggested value of ecut [Ha] and
pawecutdg [Ha] for the given accuracy.
ecut and pawecutdg are set to zero if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
"""
if not self.has_dojo_report:
return Hint(ecut=0.0, pawecutdg=0.0) # depends on [control=['if'], data=[]]
# Get hints from dojoreport. Try first in hints then in ppgen_hints.
if 'hints' in self.dojo_report:
return Hint.from_dict(self.dojo_report['hints'][accuracy]) # depends on [control=['if'], data=[]]
elif 'ppgen_hints' in self.dojo_report:
return Hint.from_dict(self.dojo_report['ppgen_hints'][accuracy]) # depends on [control=['if'], data=[]]
return Hint(ecut=0.0, pawecutdg=0.0) |
def get_endpoint_server_root(self):
"""Parses RemoteLRS object's endpoint and returns its root
:return: Root of the RemoteLRS object endpoint
:rtype: unicode
"""
parsed = urlparse(self._endpoint)
root = parsed.scheme + "://" + parsed.hostname
if parsed.port is not None:
root += ":" + unicode(parsed.port)
return root | def function[get_endpoint_server_root, parameter[self]]:
constant[Parses RemoteLRS object's endpoint and returns its root
:return: Root of the RemoteLRS object endpoint
:rtype: unicode
]
variable[parsed] assign[=] call[name[urlparse], parameter[name[self]._endpoint]]
variable[root] assign[=] binary_operation[binary_operation[name[parsed].scheme + constant[://]] + name[parsed].hostname]
if compare[name[parsed].port is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b0c65c30>
return[name[root]] | keyword[def] identifier[get_endpoint_server_root] ( identifier[self] ):
literal[string]
identifier[parsed] = identifier[urlparse] ( identifier[self] . identifier[_endpoint] )
identifier[root] = identifier[parsed] . identifier[scheme] + literal[string] + identifier[parsed] . identifier[hostname]
keyword[if] identifier[parsed] . identifier[port] keyword[is] keyword[not] keyword[None] :
identifier[root] += literal[string] + identifier[unicode] ( identifier[parsed] . identifier[port] )
keyword[return] identifier[root] | def get_endpoint_server_root(self):
"""Parses RemoteLRS object's endpoint and returns its root
:return: Root of the RemoteLRS object endpoint
:rtype: unicode
"""
parsed = urlparse(self._endpoint)
root = parsed.scheme + '://' + parsed.hostname
if parsed.port is not None:
root += ':' + unicode(parsed.port) # depends on [control=['if'], data=[]]
return root |
def get_1D_overlap(eclusters, depth=1):
"""
Find blocks that are 1D overlapping,
returns cliques of block ids that are in conflict
"""
overlap_set = set()
active = set()
ends = []
for i, (chr, left, right) in enumerate(eclusters):
ends.append((chr, left, 0, i)) # 0/1 for left/right-ness
ends.append((chr, right, 1, i))
ends.sort()
chr_last = ""
for chr, pos, left_right, i in ends:
if chr != chr_last:
active.clear()
if left_right == 0:
active.add(i)
else:
active.remove(i)
if len(active) > depth:
overlap_set.add(tuple(sorted(active)))
chr_last = chr
return overlap_set | def function[get_1D_overlap, parameter[eclusters, depth]]:
constant[
Find blocks that are 1D overlapping,
returns cliques of block ids that are in conflict
]
variable[overlap_set] assign[=] call[name[set], parameter[]]
variable[active] assign[=] call[name[set], parameter[]]
variable[ends] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f00c430>, <ast.Tuple object at 0x7da18f00cdf0>]]] in starred[call[name[enumerate], parameter[name[eclusters]]]] begin[:]
call[name[ends].append, parameter[tuple[[<ast.Name object at 0x7da18f00e7a0>, <ast.Name object at 0x7da18f00ffa0>, <ast.Constant object at 0x7da18f00e920>, <ast.Name object at 0x7da18f00f5b0>]]]]
call[name[ends].append, parameter[tuple[[<ast.Name object at 0x7da18f00cc70>, <ast.Name object at 0x7da18f00f8b0>, <ast.Constant object at 0x7da18f00c610>, <ast.Name object at 0x7da18f00ccd0>]]]]
call[name[ends].sort, parameter[]]
variable[chr_last] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da18f00e530>, <ast.Name object at 0x7da18f00cb50>, <ast.Name object at 0x7da18f00ec80>, <ast.Name object at 0x7da18f00e170>]]] in starred[name[ends]] begin[:]
if compare[name[chr] not_equal[!=] name[chr_last]] begin[:]
call[name[active].clear, parameter[]]
if compare[name[left_right] equal[==] constant[0]] begin[:]
call[name[active].add, parameter[name[i]]]
if compare[call[name[len], parameter[name[active]]] greater[>] name[depth]] begin[:]
call[name[overlap_set].add, parameter[call[name[tuple], parameter[call[name[sorted], parameter[name[active]]]]]]]
variable[chr_last] assign[=] name[chr]
return[name[overlap_set]] | keyword[def] identifier[get_1D_overlap] ( identifier[eclusters] , identifier[depth] = literal[int] ):
literal[string]
identifier[overlap_set] = identifier[set] ()
identifier[active] = identifier[set] ()
identifier[ends] =[]
keyword[for] identifier[i] ,( identifier[chr] , identifier[left] , identifier[right] ) keyword[in] identifier[enumerate] ( identifier[eclusters] ):
identifier[ends] . identifier[append] (( identifier[chr] , identifier[left] , literal[int] , identifier[i] ))
identifier[ends] . identifier[append] (( identifier[chr] , identifier[right] , literal[int] , identifier[i] ))
identifier[ends] . identifier[sort] ()
identifier[chr_last] = literal[string]
keyword[for] identifier[chr] , identifier[pos] , identifier[left_right] , identifier[i] keyword[in] identifier[ends] :
keyword[if] identifier[chr] != identifier[chr_last] :
identifier[active] . identifier[clear] ()
keyword[if] identifier[left_right] == literal[int] :
identifier[active] . identifier[add] ( identifier[i] )
keyword[else] :
identifier[active] . identifier[remove] ( identifier[i] )
keyword[if] identifier[len] ( identifier[active] )> identifier[depth] :
identifier[overlap_set] . identifier[add] ( identifier[tuple] ( identifier[sorted] ( identifier[active] )))
identifier[chr_last] = identifier[chr]
keyword[return] identifier[overlap_set] | def get_1D_overlap(eclusters, depth=1):
"""
Find blocks that are 1D overlapping,
returns cliques of block ids that are in conflict
"""
overlap_set = set()
active = set()
ends = []
for (i, (chr, left, right)) in enumerate(eclusters):
ends.append((chr, left, 0, i)) # 0/1 for left/right-ness
ends.append((chr, right, 1, i)) # depends on [control=['for'], data=[]]
ends.sort()
chr_last = ''
for (chr, pos, left_right, i) in ends:
if chr != chr_last:
active.clear() # depends on [control=['if'], data=[]]
if left_right == 0:
active.add(i) # depends on [control=['if'], data=[]]
else:
active.remove(i)
if len(active) > depth:
overlap_set.add(tuple(sorted(active))) # depends on [control=['if'], data=[]]
chr_last = chr # depends on [control=['for'], data=[]]
return overlap_set |
def _relation_module(role, interface):
"""
Return module for relation based on its role and interface, or None.
Prefers new location (reactive/relations) over old (hooks/relations).
"""
_append_path(hookenv.charm_dir())
_append_path(os.path.join(hookenv.charm_dir(), 'hooks'))
base_module = 'relations.{}.{}'.format(interface, role)
for module in ('reactive.{}'.format(base_module), base_module):
if module in sys.modules:
break
try:
importlib.import_module(module)
break
except ImportError:
continue
else:
hookenv.log('Unable to find implementation for relation: '
'{} of {}'.format(role, interface), hookenv.ERROR)
return None
return sys.modules[module] | def function[_relation_module, parameter[role, interface]]:
constant[
Return module for relation based on its role and interface, or None.
Prefers new location (reactive/relations) over old (hooks/relations).
]
call[name[_append_path], parameter[call[name[hookenv].charm_dir, parameter[]]]]
call[name[_append_path], parameter[call[name[os].path.join, parameter[call[name[hookenv].charm_dir, parameter[]], constant[hooks]]]]]
variable[base_module] assign[=] call[constant[relations.{}.{}].format, parameter[name[interface], name[role]]]
for taget[name[module]] in starred[tuple[[<ast.Call object at 0x7da1b1b9c310>, <ast.Name object at 0x7da1b1b9fe80>]]] begin[:]
if compare[name[module] in name[sys].modules] begin[:]
break
<ast.Try object at 0x7da1b1b9c9a0>
return[call[name[sys].modules][name[module]]] | keyword[def] identifier[_relation_module] ( identifier[role] , identifier[interface] ):
literal[string]
identifier[_append_path] ( identifier[hookenv] . identifier[charm_dir] ())
identifier[_append_path] ( identifier[os] . identifier[path] . identifier[join] ( identifier[hookenv] . identifier[charm_dir] (), literal[string] ))
identifier[base_module] = literal[string] . identifier[format] ( identifier[interface] , identifier[role] )
keyword[for] identifier[module] keyword[in] ( literal[string] . identifier[format] ( identifier[base_module] ), identifier[base_module] ):
keyword[if] identifier[module] keyword[in] identifier[sys] . identifier[modules] :
keyword[break]
keyword[try] :
identifier[importlib] . identifier[import_module] ( identifier[module] )
keyword[break]
keyword[except] identifier[ImportError] :
keyword[continue]
keyword[else] :
identifier[hookenv] . identifier[log] ( literal[string]
literal[string] . identifier[format] ( identifier[role] , identifier[interface] ), identifier[hookenv] . identifier[ERROR] )
keyword[return] keyword[None]
keyword[return] identifier[sys] . identifier[modules] [ identifier[module] ] | def _relation_module(role, interface):
"""
Return module for relation based on its role and interface, or None.
Prefers new location (reactive/relations) over old (hooks/relations).
"""
_append_path(hookenv.charm_dir())
_append_path(os.path.join(hookenv.charm_dir(), 'hooks'))
base_module = 'relations.{}.{}'.format(interface, role)
for module in ('reactive.{}'.format(base_module), base_module):
if module in sys.modules:
break # depends on [control=['if'], data=[]]
try:
importlib.import_module(module)
break # depends on [control=['try'], data=[]]
except ImportError:
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['module']]
else:
hookenv.log('Unable to find implementation for relation: {} of {}'.format(role, interface), hookenv.ERROR)
return None
return sys.modules[module] |
def _execute_sql_query(
self):
"""
*execute sql query using the sdss API*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
"""
self.log.info('starting the ``_execute_sql_query`` method')
# generate the api call url
params = urllib.urlencode({'cmd': self.sqlQuery, 'format': "json"})
# grab the results
results = urllib.urlopen(self.sdssUrl + '?%s' % params)
# report any errors
ofp = sys.stdout
results = results.read()
if results.startswith("ERROR"): # SQL Statement Error -> stderr
ofp = sys.stderr
ofp.write(string.rstrip(line) + os.linesep)
# clean up the json response so it can be parsed
results = results.replace(
": ,", ': "NULL",')
regex = re.compile(r'"photoz_err"\:\s*(\n\s*})')
newString = regex.sub('"photoz_err": "NULL"\g<1>', results)
results = newString
# parse the json results
results = json.loads(results)[0]
self.results = results["Rows"]
self.log.info('completed the ``_execute_sql_query`` method')
return | def function[_execute_sql_query, parameter[self]]:
constant[
*execute sql query using the sdss API*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
]
call[name[self].log.info, parameter[constant[starting the ``_execute_sql_query`` method]]]
variable[params] assign[=] call[name[urllib].urlencode, parameter[dictionary[[<ast.Constant object at 0x7da20c7cac20>, <ast.Constant object at 0x7da20c7c9690>], [<ast.Attribute object at 0x7da20c7c9930>, <ast.Constant object at 0x7da20c7c9c30>]]]]
variable[results] assign[=] call[name[urllib].urlopen, parameter[binary_operation[name[self].sdssUrl + binary_operation[constant[?%s] <ast.Mod object at 0x7da2590d6920> name[params]]]]]
variable[ofp] assign[=] name[sys].stdout
variable[results] assign[=] call[name[results].read, parameter[]]
if call[name[results].startswith, parameter[constant[ERROR]]] begin[:]
variable[ofp] assign[=] name[sys].stderr
call[name[ofp].write, parameter[binary_operation[call[name[string].rstrip, parameter[name[line]]] + name[os].linesep]]]
variable[results] assign[=] call[name[results].replace, parameter[constant[: ,], constant[: "NULL",]]]
variable[regex] assign[=] call[name[re].compile, parameter[constant["photoz_err"\:\s*(\n\s*})]]]
variable[newString] assign[=] call[name[regex].sub, parameter[constant["photoz_err": "NULL"\g<1>], name[results]]]
variable[results] assign[=] name[newString]
variable[results] assign[=] call[call[name[json].loads, parameter[name[results]]]][constant[0]]
name[self].results assign[=] call[name[results]][constant[Rows]]
call[name[self].log.info, parameter[constant[completed the ``_execute_sql_query`` method]]]
return[None] | keyword[def] identifier[_execute_sql_query] (
identifier[self] ):
literal[string]
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
identifier[params] = identifier[urllib] . identifier[urlencode] ({ literal[string] : identifier[self] . identifier[sqlQuery] , literal[string] : literal[string] })
identifier[results] = identifier[urllib] . identifier[urlopen] ( identifier[self] . identifier[sdssUrl] + literal[string] % identifier[params] )
identifier[ofp] = identifier[sys] . identifier[stdout]
identifier[results] = identifier[results] . identifier[read] ()
keyword[if] identifier[results] . identifier[startswith] ( literal[string] ):
identifier[ofp] = identifier[sys] . identifier[stderr]
identifier[ofp] . identifier[write] ( identifier[string] . identifier[rstrip] ( identifier[line] )+ identifier[os] . identifier[linesep] )
identifier[results] = identifier[results] . identifier[replace] (
literal[string] , literal[string] )
identifier[regex] = identifier[re] . identifier[compile] ( literal[string] )
identifier[newString] = identifier[regex] . identifier[sub] ( literal[string] , identifier[results] )
identifier[results] = identifier[newString]
identifier[results] = identifier[json] . identifier[loads] ( identifier[results] )[ literal[int] ]
identifier[self] . identifier[results] = identifier[results] [ literal[string] ]
identifier[self] . identifier[log] . identifier[info] ( literal[string] )
keyword[return] | def _execute_sql_query(self):
"""
*execute sql query using the sdss API*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
"""
self.log.info('starting the ``_execute_sql_query`` method')
# generate the api call url
params = urllib.urlencode({'cmd': self.sqlQuery, 'format': 'json'})
# grab the results
results = urllib.urlopen(self.sdssUrl + '?%s' % params)
# report any errors
ofp = sys.stdout
results = results.read()
if results.startswith('ERROR'): # SQL Statement Error -> stderr
ofp = sys.stderr
ofp.write(string.rstrip(line) + os.linesep) # depends on [control=['if'], data=[]]
# clean up the json response so it can be parsed
results = results.replace(': ,', ': "NULL",')
regex = re.compile('"photoz_err"\\:\\s*(\\n\\s*})')
newString = regex.sub('"photoz_err": "NULL"\\g<1>', results)
results = newString
# parse the json results
results = json.loads(results)[0]
self.results = results['Rows']
self.log.info('completed the ``_execute_sql_query`` method')
return |
def context_loader(self, callback):
"""
Decorate a method that receives a key id and returns an object or dict
that will be available in the request context as g.cavage_context
"""
if not callback or not callable(callback):
raise Exception("Please pass in a callable that loads your context.")
self.context_loader_callback = callback
return callback | def function[context_loader, parameter[self, callback]]:
constant[
Decorate a method that receives a key id and returns an object or dict
that will be available in the request context as g.cavage_context
]
if <ast.BoolOp object at 0x7da204565810> begin[:]
<ast.Raise object at 0x7da20c992b00>
name[self].context_loader_callback assign[=] name[callback]
return[name[callback]] | keyword[def] identifier[context_loader] ( identifier[self] , identifier[callback] ):
literal[string]
keyword[if] keyword[not] identifier[callback] keyword[or] keyword[not] identifier[callable] ( identifier[callback] ):
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[context_loader_callback] = identifier[callback]
keyword[return] identifier[callback] | def context_loader(self, callback):
"""
Decorate a method that receives a key id and returns an object or dict
that will be available in the request context as g.cavage_context
"""
if not callback or not callable(callback):
raise Exception('Please pass in a callable that loads your context.') # depends on [control=['if'], data=[]]
self.context_loader_callback = callback
return callback |
def execute(command, cwd=os.path.curdir, **options):
"""
Run the system command with optional options.
Args:
* command: system command.
* cwd: current working directory.
* verbose: direct options for :func:`subprocess.Popen`.
Returns:
Opened process, standard output & error.
"""
process = subprocess.Popen(shlex.split(command), cwd=cwd, **options)
stdout, stderr = process.communicate()
return process, stdout, stderr | def function[execute, parameter[command, cwd]]:
constant[
Run the system command with optional options.
Args:
* command: system command.
* cwd: current working directory.
* verbose: direct options for :func:`subprocess.Popen`.
Returns:
Opened process, standard output & error.
]
variable[process] assign[=] call[name[subprocess].Popen, parameter[call[name[shlex].split, parameter[name[command]]]]]
<ast.Tuple object at 0x7da1b16bfdc0> assign[=] call[name[process].communicate, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b16be5c0>, <ast.Name object at 0x7da1b16bcfa0>, <ast.Name object at 0x7da1b16becb0>]]] | keyword[def] identifier[execute] ( identifier[command] , identifier[cwd] = identifier[os] . identifier[path] . identifier[curdir] ,** identifier[options] ):
literal[string]
identifier[process] = identifier[subprocess] . identifier[Popen] ( identifier[shlex] . identifier[split] ( identifier[command] ), identifier[cwd] = identifier[cwd] ,** identifier[options] )
identifier[stdout] , identifier[stderr] = identifier[process] . identifier[communicate] ()
keyword[return] identifier[process] , identifier[stdout] , identifier[stderr] | def execute(command, cwd=os.path.curdir, **options):
"""
Run the system command with optional options.
Args:
* command: system command.
* cwd: current working directory.
* verbose: direct options for :func:`subprocess.Popen`.
Returns:
Opened process, standard output & error.
"""
process = subprocess.Popen(shlex.split(command), cwd=cwd, **options)
(stdout, stderr) = process.communicate()
return (process, stdout, stderr) |
def get_variable_by_axis(self, var, axis, coords=None):
"""Return the coordinate matching the specified axis
This method uses to ``'axis'`` attribute in coordinates to return the
corresponding coordinate of the given variable
Possible types
--------------
var: xarray.Variable
The variable to get the dimension for
axis: {'x', 'y', 'z', 't'}
The axis string that identifies the dimension
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The coordinate for `var` that matches the given `axis` or None if
no coordinate with the right `axis` could be found.
Notes
-----
This is a rather low-level function that only interpretes the
CFConvention. It is used by the :meth:`get_x`,
:meth:`get_y`, :meth:`get_z` and :meth:`get_t` methods
Warning
-------
If None of the coordinates have an ``'axis'`` attribute, we use the
``'coordinate'`` attribute of `var` (if existent).
Since however the CF Conventions do not determine the order on how
the coordinates shall be saved, we try to use a pattern matching
for latitude (``'lat'``) and longitude (``lon'``). If this patterns
do not match, we interpret the coordinates such that x: -1, y: -2,
z: -3. This is all not very safe for awkward dimension names,
but works for most cases. If you want to be a hundred percent sure,
use the :attr:`x`, :attr:`y`, :attr:`z` and :attr:`t` attribute.
See Also
--------
get_x, get_y, get_z, get_t"""
axis = axis.lower()
if axis not in list('xyzt'):
raise ValueError("Axis must be one of X, Y, Z, T, not {0}".format(
axis))
# we first check for the dimensions and then for the coordinates
# attribute
coords = coords or self.ds.coords
coord_names = var.attrs.get('coordinates', var.encoding.get(
'coordinates', '')).split()
if not coord_names:
return
ret = []
for coord in map(lambda dim: coords[dim], filter(
lambda dim: dim in coords, chain(
coord_names, var.dims))):
# check for the axis attribute or whether the coordinate is in the
# list of possible coordinate names
if (coord.name not in (c.name for c in ret) and
(coord.attrs.get('axis', '').lower() == axis or
coord.name in getattr(self, axis))):
ret.append(coord)
if ret:
return None if len(ret) > 1 else ret[0]
# If the coordinates attribute is specified but the coordinate
# variables themselves have no 'axis' attribute, we interpret the
# coordinates such that x: -1, y: -2, z: -3
# Since however the CF Conventions do not determine the order on how
# the coordinates shall be saved, we try to use a pattern matching
# for latitude and longitude. This is not very nice, hence it is
# better to specify the :attr:`x` and :attr:`y` attribute
tnames = self.t.intersection(coord_names)
if axis == 'x':
for cname in filter(lambda cname: re.search('lon', cname),
coord_names):
return coords[cname]
return coords.get(coord_names[-1])
elif axis == 'y' and len(coord_names) >= 2:
for cname in filter(lambda cname: re.search('lat', cname),
coord_names):
return coords[cname]
return coords.get(coord_names[-2])
elif (axis == 'z' and len(coord_names) >= 3 and
coord_names[-3] not in tnames):
return coords.get(coord_names[-3])
elif axis == 't' and tnames:
tname = next(iter(tnames))
if len(tnames) > 1:
warn("Found multiple matches for time coordinate in the "
"coordinates: %s. I use %s" % (', '.join(tnames), tname),
PsyPlotRuntimeWarning)
return coords.get(tname) | def function[get_variable_by_axis, parameter[self, var, axis, coords]]:
constant[Return the coordinate matching the specified axis
This method uses to ``'axis'`` attribute in coordinates to return the
corresponding coordinate of the given variable
Possible types
--------------
var: xarray.Variable
The variable to get the dimension for
axis: {'x', 'y', 'z', 't'}
The axis string that identifies the dimension
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The coordinate for `var` that matches the given `axis` or None if
no coordinate with the right `axis` could be found.
Notes
-----
This is a rather low-level function that only interpretes the
CFConvention. It is used by the :meth:`get_x`,
:meth:`get_y`, :meth:`get_z` and :meth:`get_t` methods
Warning
-------
If None of the coordinates have an ``'axis'`` attribute, we use the
``'coordinate'`` attribute of `var` (if existent).
Since however the CF Conventions do not determine the order on how
the coordinates shall be saved, we try to use a pattern matching
for latitude (``'lat'``) and longitude (``lon'``). If this patterns
do not match, we interpret the coordinates such that x: -1, y: -2,
z: -3. This is all not very safe for awkward dimension names,
but works for most cases. If you want to be a hundred percent sure,
use the :attr:`x`, :attr:`y`, :attr:`z` and :attr:`t` attribute.
See Also
--------
get_x, get_y, get_z, get_t]
variable[axis] assign[=] call[name[axis].lower, parameter[]]
if compare[name[axis] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[constant[xyzt]]]] begin[:]
<ast.Raise object at 0x7da18bcc82e0>
variable[coords] assign[=] <ast.BoolOp object at 0x7da18bcc8670>
variable[coord_names] assign[=] call[call[name[var].attrs.get, parameter[constant[coordinates], call[name[var].encoding.get, parameter[constant[coordinates], constant[]]]]].split, parameter[]]
if <ast.UnaryOp object at 0x7da18bccb9d0> begin[:]
return[None]
variable[ret] assign[=] list[[]]
for taget[name[coord]] in starred[call[name[map], parameter[<ast.Lambda object at 0x7da18bcc8250>, call[name[filter], parameter[<ast.Lambda object at 0x7da18bcc8910>, call[name[chain], parameter[name[coord_names], name[var].dims]]]]]]] begin[:]
if <ast.BoolOp object at 0x7da18bcc9ff0> begin[:]
call[name[ret].append, parameter[name[coord]]]
if name[ret] begin[:]
return[<ast.IfExp object at 0x7da2054a5f00>]
variable[tnames] assign[=] call[name[self].t.intersection, parameter[name[coord_names]]]
if compare[name[axis] equal[==] constant[x]] begin[:]
for taget[name[cname]] in starred[call[name[filter], parameter[<ast.Lambda object at 0x7da2054a44c0>, name[coord_names]]]] begin[:]
return[call[name[coords]][name[cname]]]
return[call[name[coords].get, parameter[call[name[coord_names]][<ast.UnaryOp object at 0x7da18bcc91b0>]]]] | keyword[def] identifier[get_variable_by_axis] ( identifier[self] , identifier[var] , identifier[axis] , identifier[coords] = keyword[None] ):
literal[string]
identifier[axis] = identifier[axis] . identifier[lower] ()
keyword[if] identifier[axis] keyword[not] keyword[in] identifier[list] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[axis] ))
identifier[coords] = identifier[coords] keyword[or] identifier[self] . identifier[ds] . identifier[coords]
identifier[coord_names] = identifier[var] . identifier[attrs] . identifier[get] ( literal[string] , identifier[var] . identifier[encoding] . identifier[get] (
literal[string] , literal[string] )). identifier[split] ()
keyword[if] keyword[not] identifier[coord_names] :
keyword[return]
identifier[ret] =[]
keyword[for] identifier[coord] keyword[in] identifier[map] ( keyword[lambda] identifier[dim] : identifier[coords] [ identifier[dim] ], identifier[filter] (
keyword[lambda] identifier[dim] : identifier[dim] keyword[in] identifier[coords] , identifier[chain] (
identifier[coord_names] , identifier[var] . identifier[dims] ))):
keyword[if] ( identifier[coord] . identifier[name] keyword[not] keyword[in] ( identifier[c] . identifier[name] keyword[for] identifier[c] keyword[in] identifier[ret] ) keyword[and]
( identifier[coord] . identifier[attrs] . identifier[get] ( literal[string] , literal[string] ). identifier[lower] ()== identifier[axis] keyword[or]
identifier[coord] . identifier[name] keyword[in] identifier[getattr] ( identifier[self] , identifier[axis] ))):
identifier[ret] . identifier[append] ( identifier[coord] )
keyword[if] identifier[ret] :
keyword[return] keyword[None] keyword[if] identifier[len] ( identifier[ret] )> literal[int] keyword[else] identifier[ret] [ literal[int] ]
identifier[tnames] = identifier[self] . identifier[t] . identifier[intersection] ( identifier[coord_names] )
keyword[if] identifier[axis] == literal[string] :
keyword[for] identifier[cname] keyword[in] identifier[filter] ( keyword[lambda] identifier[cname] : identifier[re] . identifier[search] ( literal[string] , identifier[cname] ),
identifier[coord_names] ):
keyword[return] identifier[coords] [ identifier[cname] ]
keyword[return] identifier[coords] . identifier[get] ( identifier[coord_names] [- literal[int] ])
keyword[elif] identifier[axis] == literal[string] keyword[and] identifier[len] ( identifier[coord_names] )>= literal[int] :
keyword[for] identifier[cname] keyword[in] identifier[filter] ( keyword[lambda] identifier[cname] : identifier[re] . identifier[search] ( literal[string] , identifier[cname] ),
identifier[coord_names] ):
keyword[return] identifier[coords] [ identifier[cname] ]
keyword[return] identifier[coords] . identifier[get] ( identifier[coord_names] [- literal[int] ])
keyword[elif] ( identifier[axis] == literal[string] keyword[and] identifier[len] ( identifier[coord_names] )>= literal[int] keyword[and]
identifier[coord_names] [- literal[int] ] keyword[not] keyword[in] identifier[tnames] ):
keyword[return] identifier[coords] . identifier[get] ( identifier[coord_names] [- literal[int] ])
keyword[elif] identifier[axis] == literal[string] keyword[and] identifier[tnames] :
identifier[tname] = identifier[next] ( identifier[iter] ( identifier[tnames] ))
keyword[if] identifier[len] ( identifier[tnames] )> literal[int] :
identifier[warn] ( literal[string]
literal[string] %( literal[string] . identifier[join] ( identifier[tnames] ), identifier[tname] ),
identifier[PsyPlotRuntimeWarning] )
keyword[return] identifier[coords] . identifier[get] ( identifier[tname] ) | def get_variable_by_axis(self, var, axis, coords=None):
"""Return the coordinate matching the specified axis
This method uses to ``'axis'`` attribute in coordinates to return the
corresponding coordinate of the given variable
Possible types
--------------
var: xarray.Variable
The variable to get the dimension for
axis: {'x', 'y', 'z', 't'}
The axis string that identifies the dimension
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The coordinate for `var` that matches the given `axis` or None if
no coordinate with the right `axis` could be found.
Notes
-----
This is a rather low-level function that only interpretes the
CFConvention. It is used by the :meth:`get_x`,
:meth:`get_y`, :meth:`get_z` and :meth:`get_t` methods
Warning
-------
If None of the coordinates have an ``'axis'`` attribute, we use the
``'coordinate'`` attribute of `var` (if existent).
Since however the CF Conventions do not determine the order on how
the coordinates shall be saved, we try to use a pattern matching
for latitude (``'lat'``) and longitude (``lon'``). If this patterns
do not match, we interpret the coordinates such that x: -1, y: -2,
z: -3. This is all not very safe for awkward dimension names,
but works for most cases. If you want to be a hundred percent sure,
use the :attr:`x`, :attr:`y`, :attr:`z` and :attr:`t` attribute.
See Also
--------
get_x, get_y, get_z, get_t"""
axis = axis.lower()
if axis not in list('xyzt'):
raise ValueError('Axis must be one of X, Y, Z, T, not {0}'.format(axis)) # depends on [control=['if'], data=['axis']]
# we first check for the dimensions and then for the coordinates
# attribute
coords = coords or self.ds.coords
coord_names = var.attrs.get('coordinates', var.encoding.get('coordinates', '')).split()
if not coord_names:
return # depends on [control=['if'], data=[]]
ret = []
for coord in map(lambda dim: coords[dim], filter(lambda dim: dim in coords, chain(coord_names, var.dims))):
# check for the axis attribute or whether the coordinate is in the
# list of possible coordinate names
if coord.name not in (c.name for c in ret) and (coord.attrs.get('axis', '').lower() == axis or coord.name in getattr(self, axis)):
ret.append(coord) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['coord']]
if ret:
return None if len(ret) > 1 else ret[0] # depends on [control=['if'], data=[]]
# If the coordinates attribute is specified but the coordinate
# variables themselves have no 'axis' attribute, we interpret the
# coordinates such that x: -1, y: -2, z: -3
# Since however the CF Conventions do not determine the order on how
# the coordinates shall be saved, we try to use a pattern matching
# for latitude and longitude. This is not very nice, hence it is
# better to specify the :attr:`x` and :attr:`y` attribute
tnames = self.t.intersection(coord_names)
if axis == 'x':
for cname in filter(lambda cname: re.search('lon', cname), coord_names):
return coords[cname] # depends on [control=['for'], data=['cname']]
return coords.get(coord_names[-1]) # depends on [control=['if'], data=[]]
elif axis == 'y' and len(coord_names) >= 2:
for cname in filter(lambda cname: re.search('lat', cname), coord_names):
return coords[cname] # depends on [control=['for'], data=['cname']]
return coords.get(coord_names[-2]) # depends on [control=['if'], data=[]]
elif axis == 'z' and len(coord_names) >= 3 and (coord_names[-3] not in tnames):
return coords.get(coord_names[-3]) # depends on [control=['if'], data=[]]
elif axis == 't' and tnames:
tname = next(iter(tnames))
if len(tnames) > 1:
warn('Found multiple matches for time coordinate in the coordinates: %s. I use %s' % (', '.join(tnames), tname), PsyPlotRuntimeWarning) # depends on [control=['if'], data=[]]
return coords.get(tname) # depends on [control=['if'], data=[]] |
def assign(self, role):
'''Assign :class:`Role` ``role`` to this :class:`Subject`. If this
:class:`Subject` is the :attr:`Role.owner`, this method does nothing.'''
if role.owner_id != self.id:
return self.roles.add(role) | def function[assign, parameter[self, role]]:
constant[Assign :class:`Role` ``role`` to this :class:`Subject`. If this
:class:`Subject` is the :attr:`Role.owner`, this method does nothing.]
if compare[name[role].owner_id not_equal[!=] name[self].id] begin[:]
return[call[name[self].roles.add, parameter[name[role]]]] | keyword[def] identifier[assign] ( identifier[self] , identifier[role] ):
literal[string]
keyword[if] identifier[role] . identifier[owner_id] != identifier[self] . identifier[id] :
keyword[return] identifier[self] . identifier[roles] . identifier[add] ( identifier[role] ) | def assign(self, role):
"""Assign :class:`Role` ``role`` to this :class:`Subject`. If this
:class:`Subject` is the :attr:`Role.owner`, this method does nothing."""
if role.owner_id != self.id:
return self.roles.add(role) # depends on [control=['if'], data=[]] |
def lchown(path, user, group=None, pgroup=None):
'''
Chown a file, pass the file the desired user and group without following any
symlinks.
Under Windows, the group parameter will be ignored.
This is because while files in Windows do have a 'primary group'
property, this is rarely used. It generally has no bearing on
permissions unless intentionally configured and is most commonly used to
provide Unix compatibility (e.g. Services For Unix, NFS services).
If you do want to change the 'primary group' property and understand the
implications, pass the Windows only parameter, pgroup, instead.
To set the primary group to 'None', it must be specified in quotes.
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
user (str): The name of the user to own the file
group (str): The group (not used)
pgroup (str): The primary group to assign
Returns:
bool: True if successful, otherwise error
CLI Example:
.. code-block:: bash
salt '*' file.lchown c:\\temp\\test.txt myusername
salt '*' file.lchown c:\\temp\\test.txt myusername pgroup=Administrators
salt '*' file.lchown c:\\temp\\test.txt myusername "pgroup='None'"
'''
if group:
func_name = '{0}.lchown'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The group parameter has no effect when using %s on '
'Windows systems; see function docs for details.',
func_name)
log.debug('win_file.py %s Ignoring the group parameter for %s',
func_name, path)
group = None
return chown(path, user, group, pgroup, follow_symlinks=False) | def function[lchown, parameter[path, user, group, pgroup]]:
constant[
Chown a file, pass the file the desired user and group without following any
symlinks.
Under Windows, the group parameter will be ignored.
This is because while files in Windows do have a 'primary group'
property, this is rarely used. It generally has no bearing on
permissions unless intentionally configured and is most commonly used to
provide Unix compatibility (e.g. Services For Unix, NFS services).
If you do want to change the 'primary group' property and understand the
implications, pass the Windows only parameter, pgroup, instead.
To set the primary group to 'None', it must be specified in quotes.
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
user (str): The name of the user to own the file
group (str): The group (not used)
pgroup (str): The primary group to assign
Returns:
bool: True if successful, otherwise error
CLI Example:
.. code-block:: bash
salt '*' file.lchown c:\temp\test.txt myusername
salt '*' file.lchown c:\temp\test.txt myusername pgroup=Administrators
salt '*' file.lchown c:\temp\test.txt myusername "pgroup='None'"
]
if name[group] begin[:]
variable[func_name] assign[=] call[constant[{0}.lchown].format, parameter[name[__virtualname__]]]
if compare[call[name[__opts__].get, parameter[constant[fun], constant[]]] equal[==] name[func_name]] begin[:]
call[name[log].info, parameter[constant[The group parameter has no effect when using %s on Windows systems; see function docs for details.], name[func_name]]]
call[name[log].debug, parameter[constant[win_file.py %s Ignoring the group parameter for %s], name[func_name], name[path]]]
variable[group] assign[=] constant[None]
return[call[name[chown], parameter[name[path], name[user], name[group], name[pgroup]]]] | keyword[def] identifier[lchown] ( identifier[path] , identifier[user] , identifier[group] = keyword[None] , identifier[pgroup] = keyword[None] ):
literal[string]
keyword[if] identifier[group] :
identifier[func_name] = literal[string] . identifier[format] ( identifier[__virtualname__] )
keyword[if] identifier[__opts__] . identifier[get] ( literal[string] , literal[string] )== identifier[func_name] :
identifier[log] . identifier[info] ( literal[string]
literal[string] ,
identifier[func_name] )
identifier[log] . identifier[debug] ( literal[string] ,
identifier[func_name] , identifier[path] )
identifier[group] = keyword[None]
keyword[return] identifier[chown] ( identifier[path] , identifier[user] , identifier[group] , identifier[pgroup] , identifier[follow_symlinks] = keyword[False] ) | def lchown(path, user, group=None, pgroup=None):
"""
Chown a file, pass the file the desired user and group without following any
symlinks.
Under Windows, the group parameter will be ignored.
This is because while files in Windows do have a 'primary group'
property, this is rarely used. It generally has no bearing on
permissions unless intentionally configured and is most commonly used to
provide Unix compatibility (e.g. Services For Unix, NFS services).
If you do want to change the 'primary group' property and understand the
implications, pass the Windows only parameter, pgroup, instead.
To set the primary group to 'None', it must be specified in quotes.
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
user (str): The name of the user to own the file
group (str): The group (not used)
pgroup (str): The primary group to assign
Returns:
bool: True if successful, otherwise error
CLI Example:
.. code-block:: bash
salt '*' file.lchown c:\\temp\\test.txt myusername
salt '*' file.lchown c:\\temp\\test.txt myusername pgroup=Administrators
salt '*' file.lchown c:\\temp\\test.txt myusername "pgroup='None'"
"""
if group:
func_name = '{0}.lchown'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The group parameter has no effect when using %s on Windows systems; see function docs for details.', func_name) # depends on [control=['if'], data=['func_name']]
log.debug('win_file.py %s Ignoring the group parameter for %s', func_name, path)
group = None # depends on [control=['if'], data=[]]
return chown(path, user, group, pgroup, follow_symlinks=False) |
def ensure_app_cache_dir(appname, *args):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dpath = ut.ensure_app_cache_dir('utool')
>>> assert exists(dpath)
"""
import utool as ut
dpath = get_app_cache_dir(appname, *args)
ut.ensuredir(dpath)
return dpath | def function[ensure_app_cache_dir, parameter[appname]]:
constant[
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dpath = ut.ensure_app_cache_dir('utool')
>>> assert exists(dpath)
]
import module[utool] as alias[ut]
variable[dpath] assign[=] call[name[get_app_cache_dir], parameter[name[appname], <ast.Starred object at 0x7da1b24bf130>]]
call[name[ut].ensuredir, parameter[name[dpath]]]
return[name[dpath]] | keyword[def] identifier[ensure_app_cache_dir] ( identifier[appname] ,* identifier[args] ):
literal[string]
keyword[import] identifier[utool] keyword[as] identifier[ut]
identifier[dpath] = identifier[get_app_cache_dir] ( identifier[appname] ,* identifier[args] )
identifier[ut] . identifier[ensuredir] ( identifier[dpath] )
keyword[return] identifier[dpath] | def ensure_app_cache_dir(appname, *args):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dpath = ut.ensure_app_cache_dir('utool')
>>> assert exists(dpath)
"""
import utool as ut
dpath = get_app_cache_dir(appname, *args)
ut.ensuredir(dpath)
return dpath |
def generate_gaussian_profile(seeing_fwhm):
"""Generate a normalized Gaussian profile from its FWHM"""
FWHM_G = 2 * math.sqrt(2 * math.log(2))
sigma = seeing_fwhm / FWHM_G
amplitude = 1.0 / (2 * math.pi * sigma * sigma)
seeing_model = Gaussian2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
x_stddev=sigma,
y_stddev=sigma)
return seeing_model | def function[generate_gaussian_profile, parameter[seeing_fwhm]]:
constant[Generate a normalized Gaussian profile from its FWHM]
variable[FWHM_G] assign[=] binary_operation[constant[2] * call[name[math].sqrt, parameter[binary_operation[constant[2] * call[name[math].log, parameter[constant[2]]]]]]]
variable[sigma] assign[=] binary_operation[name[seeing_fwhm] / name[FWHM_G]]
variable[amplitude] assign[=] binary_operation[constant[1.0] / binary_operation[binary_operation[binary_operation[constant[2] * name[math].pi] * name[sigma]] * name[sigma]]]
variable[seeing_model] assign[=] call[name[Gaussian2D], parameter[]]
return[name[seeing_model]] | keyword[def] identifier[generate_gaussian_profile] ( identifier[seeing_fwhm] ):
literal[string]
identifier[FWHM_G] = literal[int] * identifier[math] . identifier[sqrt] ( literal[int] * identifier[math] . identifier[log] ( literal[int] ))
identifier[sigma] = identifier[seeing_fwhm] / identifier[FWHM_G]
identifier[amplitude] = literal[int] /( literal[int] * identifier[math] . identifier[pi] * identifier[sigma] * identifier[sigma] )
identifier[seeing_model] = identifier[Gaussian2D] ( identifier[amplitude] = identifier[amplitude] ,
identifier[x_mean] = literal[int] ,
identifier[y_mean] = literal[int] ,
identifier[x_stddev] = identifier[sigma] ,
identifier[y_stddev] = identifier[sigma] )
keyword[return] identifier[seeing_model] | def generate_gaussian_profile(seeing_fwhm):
"""Generate a normalized Gaussian profile from its FWHM"""
FWHM_G = 2 * math.sqrt(2 * math.log(2))
sigma = seeing_fwhm / FWHM_G
amplitude = 1.0 / (2 * math.pi * sigma * sigma)
seeing_model = Gaussian2D(amplitude=amplitude, x_mean=0.0, y_mean=0.0, x_stddev=sigma, y_stddev=sigma)
return seeing_model |
def rebuild(self, image, wait=True):
"""
Rebuild this droplet with given image id
Parameters
----------
image: int or str
int for image id and str for image slug
wait: bool, default True
Whether to block until the pending action is completed
"""
return self._action('rebuild', image=image, wait=wait) | def function[rebuild, parameter[self, image, wait]]:
constant[
Rebuild this droplet with given image id
Parameters
----------
image: int or str
int for image id and str for image slug
wait: bool, default True
Whether to block until the pending action is completed
]
return[call[name[self]._action, parameter[constant[rebuild]]]] | keyword[def] identifier[rebuild] ( identifier[self] , identifier[image] , identifier[wait] = keyword[True] ):
literal[string]
keyword[return] identifier[self] . identifier[_action] ( literal[string] , identifier[image] = identifier[image] , identifier[wait] = identifier[wait] ) | def rebuild(self, image, wait=True):
"""
Rebuild this droplet with given image id
Parameters
----------
image: int or str
int for image id and str for image slug
wait: bool, default True
Whether to block until the pending action is completed
"""
return self._action('rebuild', image=image, wait=wait) |
def error_received(self, exception):
"""Datagram error callback."""
if exception is None:
pass
else:
import pprint
pprint.pprint(exception)
_LOGGER.error('Error received: %s', exception) | def function[error_received, parameter[self, exception]]:
constant[Datagram error callback.]
if compare[name[exception] is constant[None]] begin[:]
pass | keyword[def] identifier[error_received] ( identifier[self] , identifier[exception] ):
literal[string]
keyword[if] identifier[exception] keyword[is] keyword[None] :
keyword[pass]
keyword[else] :
keyword[import] identifier[pprint]
identifier[pprint] . identifier[pprint] ( identifier[exception] )
identifier[_LOGGER] . identifier[error] ( literal[string] , identifier[exception] ) | def error_received(self, exception):
"""Datagram error callback."""
if exception is None:
pass # depends on [control=['if'], data=[]]
else:
import pprint
pprint.pprint(exception)
_LOGGER.error('Error received: %s', exception) |
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in self.read_headers(sock_file).items():
environ[str('HTTP_'+k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
else:
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ | def function[build_environ, parameter[self, sock_file, conn]]:
constant[ Build the execution environment. ]
variable[request] assign[=] call[name[self].read_request_line, parameter[name[sock_file]]]
variable[environ] assign[=] call[name[self].base_environ.copy, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1aff00d00>, <ast.Name object at 0x7da1aff008b0>]]] in starred[call[call[name[self].read_headers, parameter[name[sock_file]]].items, parameter[]]] begin[:]
call[name[environ]][call[name[str], parameter[binary_operation[constant[HTTP_] + name[k]]]]] assign[=] name[v]
call[name[environ]][constant[REQUEST_METHOD]] assign[=] call[name[request]][constant[method]]
call[name[environ]][constant[PATH_INFO]] assign[=] call[name[request]][constant[path]]
call[name[environ]][constant[SERVER_PROTOCOL]] assign[=] call[name[request]][constant[protocol]]
call[name[environ]][constant[SERVER_PORT]] assign[=] call[name[str], parameter[name[conn].server_port]]
call[name[environ]][constant[REMOTE_PORT]] assign[=] call[name[str], parameter[name[conn].client_port]]
call[name[environ]][constant[REMOTE_ADDR]] assign[=] call[name[str], parameter[name[conn].client_addr]]
call[name[environ]][constant[QUERY_STRING]] assign[=] call[name[request]][constant[query_string]]
if compare[constant[HTTP_CONTENT_LENGTH] in name[environ]] begin[:]
call[name[environ]][constant[CONTENT_LENGTH]] assign[=] call[name[environ]][constant[HTTP_CONTENT_LENGTH]]
if compare[constant[HTTP_CONTENT_TYPE] in name[environ]] begin[:]
call[name[environ]][constant[CONTENT_TYPE]] assign[=] call[name[environ]][constant[HTTP_CONTENT_TYPE]]
name[self].request_method assign[=] call[name[environ]][constant[REQUEST_METHOD]]
if name[conn].ssl begin[:]
call[name[environ]][constant[wsgi.url_scheme]] assign[=] constant[https]
call[name[environ]][constant[HTTPS]] assign[=] constant[on]
if compare[call[name[environ].get, parameter[constant[HTTP_TRANSFER_ENCODING], constant[]]] equal[==] constant[chunked]] begin[:]
call[name[environ]][constant[wsgi.input]] assign[=] call[name[ChunkedReader], parameter[name[sock_file]]]
return[name[environ]] | keyword[def] identifier[build_environ] ( identifier[self] , identifier[sock_file] , identifier[conn] ):
literal[string]
identifier[request] = identifier[self] . identifier[read_request_line] ( identifier[sock_file] )
identifier[environ] = identifier[self] . identifier[base_environ] . identifier[copy] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[read_headers] ( identifier[sock_file] ). identifier[items] ():
identifier[environ] [ identifier[str] ( literal[string] + identifier[k] )]= identifier[v]
identifier[environ] [ literal[string] ]= identifier[request] [ literal[string] ]
identifier[environ] [ literal[string] ]= identifier[request] [ literal[string] ]
identifier[environ] [ literal[string] ]= identifier[request] [ literal[string] ]
identifier[environ] [ literal[string] ]= identifier[str] ( identifier[conn] . identifier[server_port] )
identifier[environ] [ literal[string] ]= identifier[str] ( identifier[conn] . identifier[client_port] )
identifier[environ] [ literal[string] ]= identifier[str] ( identifier[conn] . identifier[client_addr] )
identifier[environ] [ literal[string] ]= identifier[request] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[environ] :
identifier[environ] [ literal[string] ]= identifier[environ] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[environ] :
identifier[environ] [ literal[string] ]= identifier[environ] [ literal[string] ]
identifier[self] . identifier[request_method] = identifier[environ] [ literal[string] ]
keyword[if] identifier[conn] . identifier[ssl] :
identifier[environ] [ literal[string] ]= literal[string]
identifier[environ] [ literal[string] ]= literal[string]
keyword[else] :
identifier[environ] [ literal[string] ]= literal[string]
keyword[if] identifier[environ] . identifier[get] ( literal[string] , literal[string] )== literal[string] :
identifier[environ] [ literal[string] ]= identifier[ChunkedReader] ( identifier[sock_file] )
keyword[else] :
identifier[environ] [ literal[string] ]= identifier[sock_file]
keyword[return] identifier[environ] | def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for (k, v) in self.read_headers(sock_file).items():
environ[str('HTTP_' + k)] = v # depends on [control=['for'], data=[]]
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH'] # depends on [control=['if'], data=['environ']]
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE'] # depends on [control=['if'], data=['environ']]
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on' # depends on [control=['if'], data=[]]
else:
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file) # depends on [control=['if'], data=[]]
else:
environ['wsgi.input'] = sock_file
return environ |
def handle(self, line_info):
# print "normal: ", line_info
"""Handle normal input lines. Use as a template for handlers."""
# With autoindent on, we need some way to exit the input loop, and I
# don't want to force the user to have to backspace all the way to
# clear the line. The rule will be in this case, that either two
# lines of pure whitespace in a row, or a line of pure whitespace but
# of a size different to the indent level, will exit the input loop.
line = line_info.line
continue_prompt = line_info.continue_prompt
if (continue_prompt and
self.shell.autoindent and
line.isspace() and
0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
line = ''
return line | def function[handle, parameter[self, line_info]]:
constant[Handle normal input lines. Use as a template for handlers.]
variable[line] assign[=] name[line_info].line
variable[continue_prompt] assign[=] name[line_info].continue_prompt
if <ast.BoolOp object at 0x7da1b26acbe0> begin[:]
variable[line] assign[=] constant[]
return[name[line]] | keyword[def] identifier[handle] ( identifier[self] , identifier[line_info] ):
literal[string]
identifier[line] = identifier[line_info] . identifier[line]
identifier[continue_prompt] = identifier[line_info] . identifier[continue_prompt]
keyword[if] ( identifier[continue_prompt] keyword[and]
identifier[self] . identifier[shell] . identifier[autoindent] keyword[and]
identifier[line] . identifier[isspace] () keyword[and]
literal[int] < identifier[abs] ( identifier[len] ( identifier[line] )- identifier[self] . identifier[shell] . identifier[indent_current_nsp] )<= literal[int] ):
identifier[line] = literal[string]
keyword[return] identifier[line] | def handle(self, line_info):
# print "normal: ", line_info
'Handle normal input lines. Use as a template for handlers.'
# With autoindent on, we need some way to exit the input loop, and I
# don't want to force the user to have to backspace all the way to
# clear the line. The rule will be in this case, that either two
# lines of pure whitespace in a row, or a line of pure whitespace but
# of a size different to the indent level, will exit the input loop.
line = line_info.line
continue_prompt = line_info.continue_prompt
if continue_prompt and self.shell.autoindent and line.isspace() and (0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
line = '' # depends on [control=['if'], data=[]]
return line |
def gaussian(self, x, p):
"""Gaussian fitting function in 1D. Makes a sine function with
amplitude determined by maxv. See calc_fwhm().
p[0]==mean, p[1]==sdev, p[2]=maxv
"""
y = (1.0 / (p[1] * np.sqrt(2 * np.pi)) *
np.exp(-(x - p[0]) ** 2 / (2 * p[1] ** 2))) * p[2]
return y | def function[gaussian, parameter[self, x, p]]:
constant[Gaussian fitting function in 1D. Makes a sine function with
amplitude determined by maxv. See calc_fwhm().
p[0]==mean, p[1]==sdev, p[2]=maxv
]
variable[y] assign[=] binary_operation[binary_operation[binary_operation[constant[1.0] / binary_operation[call[name[p]][constant[1]] * call[name[np].sqrt, parameter[binary_operation[constant[2] * name[np].pi]]]]] * call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da204960850> / binary_operation[constant[2] * binary_operation[call[name[p]][constant[1]] ** constant[2]]]]]]] * call[name[p]][constant[2]]]
return[name[y]] | keyword[def] identifier[gaussian] ( identifier[self] , identifier[x] , identifier[p] ):
literal[string]
identifier[y] =( literal[int] /( identifier[p] [ literal[int] ]* identifier[np] . identifier[sqrt] ( literal[int] * identifier[np] . identifier[pi] ))*
identifier[np] . identifier[exp] (-( identifier[x] - identifier[p] [ literal[int] ])** literal[int] /( literal[int] * identifier[p] [ literal[int] ]** literal[int] )))* identifier[p] [ literal[int] ]
keyword[return] identifier[y] | def gaussian(self, x, p):
"""Gaussian fitting function in 1D. Makes a sine function with
amplitude determined by maxv. See calc_fwhm().
p[0]==mean, p[1]==sdev, p[2]=maxv
"""
y = 1.0 / (p[1] * np.sqrt(2 * np.pi)) * np.exp(-(x - p[0]) ** 2 / (2 * p[1] ** 2)) * p[2]
return y |
def _get_containers(self):
"""Return available containers."""
buckets = self.native_conn.get_all_buckets()
return [self.cont_cls.from_bucket(self, b) for b in buckets] | def function[_get_containers, parameter[self]]:
constant[Return available containers.]
variable[buckets] assign[=] call[name[self].native_conn.get_all_buckets, parameter[]]
return[<ast.ListComp object at 0x7da18f00d7e0>] | keyword[def] identifier[_get_containers] ( identifier[self] ):
literal[string]
identifier[buckets] = identifier[self] . identifier[native_conn] . identifier[get_all_buckets] ()
keyword[return] [ identifier[self] . identifier[cont_cls] . identifier[from_bucket] ( identifier[self] , identifier[b] ) keyword[for] identifier[b] keyword[in] identifier[buckets] ] | def _get_containers(self):
"""Return available containers."""
buckets = self.native_conn.get_all_buckets()
return [self.cont_cls.from_bucket(self, b) for b in buckets] |
def semanticSimilarity(self, text1, text2, distanceMeasure = "cosine"):
"""
determine the semantic similarity of the two provided documents
@param text1: first document to analyze
@param text2: second document to analyze
@param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard"
@returns: dict
"""
return self._er.jsonRequestAnalytics("/api/v1/semanticSimilarity", { "text1": text1, "text2": text2, "distanceMeasure": distanceMeasure }) | def function[semanticSimilarity, parameter[self, text1, text2, distanceMeasure]]:
constant[
determine the semantic similarity of the two provided documents
@param text1: first document to analyze
@param text2: second document to analyze
@param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard"
@returns: dict
]
return[call[name[self]._er.jsonRequestAnalytics, parameter[constant[/api/v1/semanticSimilarity], dictionary[[<ast.Constant object at 0x7da2047ea530>, <ast.Constant object at 0x7da2047e9600>, <ast.Constant object at 0x7da2047eb010>], [<ast.Name object at 0x7da2047e8550>, <ast.Name object at 0x7da2047e8280>, <ast.Name object at 0x7da2047e8eb0>]]]]] | keyword[def] identifier[semanticSimilarity] ( identifier[self] , identifier[text1] , identifier[text2] , identifier[distanceMeasure] = literal[string] ):
literal[string]
keyword[return] identifier[self] . identifier[_er] . identifier[jsonRequestAnalytics] ( literal[string] ,{ literal[string] : identifier[text1] , literal[string] : identifier[text2] , literal[string] : identifier[distanceMeasure] }) | def semanticSimilarity(self, text1, text2, distanceMeasure='cosine'):
"""
determine the semantic similarity of the two provided documents
@param text1: first document to analyze
@param text2: second document to analyze
@param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard"
@returns: dict
"""
return self._er.jsonRequestAnalytics('/api/v1/semanticSimilarity', {'text1': text1, 'text2': text2, 'distanceMeasure': distanceMeasure}) |
def targets(tgt, tgt_type='range', **kwargs):
'''
Return the targets from a range query
'''
r = seco.range.Range(__opts__['range_server'])
log.debug('Range connection to \'%s\' established', __opts__['range_server'])
hosts = []
try:
log.debug('Querying range for \'%s\'', tgt)
hosts = r.expand(tgt)
except seco.range.RangeException as err:
log.error('Range server exception: %s', err)
return {}
log.debug('Range responded with: \'%s\'', hosts)
# Currently we only support giving a raw range entry, no target filtering supported other than what range returns :S
tgt_func = {
'range': target_range,
'glob': target_range,
# 'glob': target_glob,
}
log.debug('Filtering using tgt_type: \'%s\'', tgt_type)
try:
targeted_hosts = tgt_func[tgt_type](tgt, hosts)
except KeyError:
raise NotImplementedError
log.debug('Targeting data for salt-ssh: \'%s\'', targeted_hosts)
return targeted_hosts | def function[targets, parameter[tgt, tgt_type]]:
constant[
Return the targets from a range query
]
variable[r] assign[=] call[name[seco].range.Range, parameter[call[name[__opts__]][constant[range_server]]]]
call[name[log].debug, parameter[constant[Range connection to '%s' established], call[name[__opts__]][constant[range_server]]]]
variable[hosts] assign[=] list[[]]
<ast.Try object at 0x7da18f00ccd0>
call[name[log].debug, parameter[constant[Range responded with: '%s'], name[hosts]]]
variable[tgt_func] assign[=] dictionary[[<ast.Constant object at 0x7da18f00e9b0>, <ast.Constant object at 0x7da18f00f700>], [<ast.Name object at 0x7da18f00f190>, <ast.Name object at 0x7da18f00f9d0>]]
call[name[log].debug, parameter[constant[Filtering using tgt_type: '%s'], name[tgt_type]]]
<ast.Try object at 0x7da18f00f0d0>
call[name[log].debug, parameter[constant[Targeting data for salt-ssh: '%s'], name[targeted_hosts]]]
return[name[targeted_hosts]] | keyword[def] identifier[targets] ( identifier[tgt] , identifier[tgt_type] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[r] = identifier[seco] . identifier[range] . identifier[Range] ( identifier[__opts__] [ literal[string] ])
identifier[log] . identifier[debug] ( literal[string] , identifier[__opts__] [ literal[string] ])
identifier[hosts] =[]
keyword[try] :
identifier[log] . identifier[debug] ( literal[string] , identifier[tgt] )
identifier[hosts] = identifier[r] . identifier[expand] ( identifier[tgt] )
keyword[except] identifier[seco] . identifier[range] . identifier[RangeException] keyword[as] identifier[err] :
identifier[log] . identifier[error] ( literal[string] , identifier[err] )
keyword[return] {}
identifier[log] . identifier[debug] ( literal[string] , identifier[hosts] )
identifier[tgt_func] ={
literal[string] : identifier[target_range] ,
literal[string] : identifier[target_range] ,
}
identifier[log] . identifier[debug] ( literal[string] , identifier[tgt_type] )
keyword[try] :
identifier[targeted_hosts] = identifier[tgt_func] [ identifier[tgt_type] ]( identifier[tgt] , identifier[hosts] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[NotImplementedError]
identifier[log] . identifier[debug] ( literal[string] , identifier[targeted_hosts] )
keyword[return] identifier[targeted_hosts] | def targets(tgt, tgt_type='range', **kwargs):
"""
Return the targets from a range query
"""
r = seco.range.Range(__opts__['range_server'])
log.debug("Range connection to '%s' established", __opts__['range_server'])
hosts = []
try:
log.debug("Querying range for '%s'", tgt)
hosts = r.expand(tgt) # depends on [control=['try'], data=[]]
except seco.range.RangeException as err:
log.error('Range server exception: %s', err)
return {} # depends on [control=['except'], data=['err']]
log.debug("Range responded with: '%s'", hosts)
# Currently we only support giving a raw range entry, no target filtering supported other than what range returns :S
# 'glob': target_glob,
tgt_func = {'range': target_range, 'glob': target_range}
log.debug("Filtering using tgt_type: '%s'", tgt_type)
try:
targeted_hosts = tgt_func[tgt_type](tgt, hosts) # depends on [control=['try'], data=[]]
except KeyError:
raise NotImplementedError # depends on [control=['except'], data=[]]
log.debug("Targeting data for salt-ssh: '%s'", targeted_hosts)
return targeted_hosts |
def delete_multifactor(self, id, provider):
"""Delete a user's multifactor provider.
Args:
id (str): The user's id.
provider (str): The multifactor provider. Supported values 'duo'
or 'google-authenticator'
See: https://auth0.com/docs/api/management/v2#!/Users/delete_multifactor_by_provider
"""
url = self._url('{}/multifactor/{}'.format(id, provider))
return self.client.delete(url) | def function[delete_multifactor, parameter[self, id, provider]]:
constant[Delete a user's multifactor provider.
Args:
id (str): The user's id.
provider (str): The multifactor provider. Supported values 'duo'
or 'google-authenticator'
See: https://auth0.com/docs/api/management/v2#!/Users/delete_multifactor_by_provider
]
variable[url] assign[=] call[name[self]._url, parameter[call[constant[{}/multifactor/{}].format, parameter[name[id], name[provider]]]]]
return[call[name[self].client.delete, parameter[name[url]]]] | keyword[def] identifier[delete_multifactor] ( identifier[self] , identifier[id] , identifier[provider] ):
literal[string]
identifier[url] = identifier[self] . identifier[_url] ( literal[string] . identifier[format] ( identifier[id] , identifier[provider] ))
keyword[return] identifier[self] . identifier[client] . identifier[delete] ( identifier[url] ) | def delete_multifactor(self, id, provider):
"""Delete a user's multifactor provider.
Args:
id (str): The user's id.
provider (str): The multifactor provider. Supported values 'duo'
or 'google-authenticator'
See: https://auth0.com/docs/api/management/v2#!/Users/delete_multifactor_by_provider
"""
url = self._url('{}/multifactor/{}'.format(id, provider))
return self.client.delete(url) |
def cli(
ctx,
config_file=None,
requirements=None,
profile=None):
'''
An abstraction layer for data storage systems
DataFS is a package manager for data. It manages file versions,
dependencies, and metadata for individual use or large organizations.
For more information, see the docs at https://datafs.readthedocs.io
'''
ctx.obj = _DataFSInterface()
ctx.obj.config_file = config_file
ctx.obj.requirements = requirements
ctx.obj.profile = profile
def teardown():
if hasattr(ctx.obj, 'api'):
ctx.obj.api.close()
ctx.call_on_close(teardown) | def function[cli, parameter[ctx, config_file, requirements, profile]]:
constant[
An abstraction layer for data storage systems
DataFS is a package manager for data. It manages file versions,
dependencies, and metadata for individual use or large organizations.
For more information, see the docs at https://datafs.readthedocs.io
]
name[ctx].obj assign[=] call[name[_DataFSInterface], parameter[]]
name[ctx].obj.config_file assign[=] name[config_file]
name[ctx].obj.requirements assign[=] name[requirements]
name[ctx].obj.profile assign[=] name[profile]
def function[teardown, parameter[]]:
if call[name[hasattr], parameter[name[ctx].obj, constant[api]]] begin[:]
call[name[ctx].obj.api.close, parameter[]]
call[name[ctx].call_on_close, parameter[name[teardown]]] | keyword[def] identifier[cli] (
identifier[ctx] ,
identifier[config_file] = keyword[None] ,
identifier[requirements] = keyword[None] ,
identifier[profile] = keyword[None] ):
literal[string]
identifier[ctx] . identifier[obj] = identifier[_DataFSInterface] ()
identifier[ctx] . identifier[obj] . identifier[config_file] = identifier[config_file]
identifier[ctx] . identifier[obj] . identifier[requirements] = identifier[requirements]
identifier[ctx] . identifier[obj] . identifier[profile] = identifier[profile]
keyword[def] identifier[teardown] ():
keyword[if] identifier[hasattr] ( identifier[ctx] . identifier[obj] , literal[string] ):
identifier[ctx] . identifier[obj] . identifier[api] . identifier[close] ()
identifier[ctx] . identifier[call_on_close] ( identifier[teardown] ) | def cli(ctx, config_file=None, requirements=None, profile=None):
"""
An abstraction layer for data storage systems
DataFS is a package manager for data. It manages file versions,
dependencies, and metadata for individual use or large organizations.
For more information, see the docs at https://datafs.readthedocs.io
"""
ctx.obj = _DataFSInterface()
ctx.obj.config_file = config_file
ctx.obj.requirements = requirements
ctx.obj.profile = profile
def teardown():
if hasattr(ctx.obj, 'api'):
ctx.obj.api.close() # depends on [control=['if'], data=[]]
ctx.call_on_close(teardown) |
def push(self, line, frame, buffer_output=True):
"""Change built-in stdout and stderr methods by the
new custom StdMessage.
execute the InteractiveConsole.push.
Change the stdout and stderr back be the original built-ins
:param buffer_output: if False won't redirect the output.
Return boolean (True if more input is required else False),
output_messages and input_messages
"""
self.__buffer_output = buffer_output
more = False
if buffer_output:
original_stdout = sys.stdout
original_stderr = sys.stderr
try:
try:
self.frame = frame
if buffer_output:
out = sys.stdout = IOBuf()
err = sys.stderr = IOBuf()
more = self.add_exec(line)
except Exception:
exc = get_exception_traceback_str()
if buffer_output:
err.buflist.append("Internal Error: %s" % (exc,))
else:
sys.stderr.write("Internal Error: %s\n" % (exc,))
finally:
#Remove frame references.
self.frame = None
frame = None
if buffer_output:
sys.stdout = original_stdout
sys.stderr = original_stderr
if buffer_output:
return more, out.buflist, err.buflist
else:
return more, [], [] | def function[push, parameter[self, line, frame, buffer_output]]:
constant[Change built-in stdout and stderr methods by the
new custom StdMessage.
execute the InteractiveConsole.push.
Change the stdout and stderr back be the original built-ins
:param buffer_output: if False won't redirect the output.
Return boolean (True if more input is required else False),
output_messages and input_messages
]
name[self].__buffer_output assign[=] name[buffer_output]
variable[more] assign[=] constant[False]
if name[buffer_output] begin[:]
variable[original_stdout] assign[=] name[sys].stdout
variable[original_stderr] assign[=] name[sys].stderr
<ast.Try object at 0x7da1b0779570>
if name[buffer_output] begin[:]
return[tuple[[<ast.Name object at 0x7da1b0779540>, <ast.Attribute object at 0x7da1b0778c10>, <ast.Attribute object at 0x7da204620f10>]]] | keyword[def] identifier[push] ( identifier[self] , identifier[line] , identifier[frame] , identifier[buffer_output] = keyword[True] ):
literal[string]
identifier[self] . identifier[__buffer_output] = identifier[buffer_output]
identifier[more] = keyword[False]
keyword[if] identifier[buffer_output] :
identifier[original_stdout] = identifier[sys] . identifier[stdout]
identifier[original_stderr] = identifier[sys] . identifier[stderr]
keyword[try] :
keyword[try] :
identifier[self] . identifier[frame] = identifier[frame]
keyword[if] identifier[buffer_output] :
identifier[out] = identifier[sys] . identifier[stdout] = identifier[IOBuf] ()
identifier[err] = identifier[sys] . identifier[stderr] = identifier[IOBuf] ()
identifier[more] = identifier[self] . identifier[add_exec] ( identifier[line] )
keyword[except] identifier[Exception] :
identifier[exc] = identifier[get_exception_traceback_str] ()
keyword[if] identifier[buffer_output] :
identifier[err] . identifier[buflist] . identifier[append] ( literal[string] %( identifier[exc] ,))
keyword[else] :
identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] %( identifier[exc] ,))
keyword[finally] :
identifier[self] . identifier[frame] = keyword[None]
identifier[frame] = keyword[None]
keyword[if] identifier[buffer_output] :
identifier[sys] . identifier[stdout] = identifier[original_stdout]
identifier[sys] . identifier[stderr] = identifier[original_stderr]
keyword[if] identifier[buffer_output] :
keyword[return] identifier[more] , identifier[out] . identifier[buflist] , identifier[err] . identifier[buflist]
keyword[else] :
keyword[return] identifier[more] ,[],[] | def push(self, line, frame, buffer_output=True):
"""Change built-in stdout and stderr methods by the
new custom StdMessage.
execute the InteractiveConsole.push.
Change the stdout and stderr back be the original built-ins
:param buffer_output: if False won't redirect the output.
Return boolean (True if more input is required else False),
output_messages and input_messages
"""
self.__buffer_output = buffer_output
more = False
if buffer_output:
original_stdout = sys.stdout
original_stderr = sys.stderr # depends on [control=['if'], data=[]]
try:
try:
self.frame = frame
if buffer_output:
out = sys.stdout = IOBuf()
err = sys.stderr = IOBuf() # depends on [control=['if'], data=[]]
more = self.add_exec(line) # depends on [control=['try'], data=[]]
except Exception:
exc = get_exception_traceback_str()
if buffer_output:
err.buflist.append('Internal Error: %s' % (exc,)) # depends on [control=['if'], data=[]]
else:
sys.stderr.write('Internal Error: %s\n' % (exc,)) # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
finally:
#Remove frame references.
self.frame = None
frame = None
if buffer_output:
sys.stdout = original_stdout
sys.stderr = original_stderr # depends on [control=['if'], data=[]]
if buffer_output:
return (more, out.buflist, err.buflist) # depends on [control=['if'], data=[]]
else:
return (more, [], []) |
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_high_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 4)
self.set_attributes(priority, address, rtr)
self.relay_channels = self.byte_to_channels(data)
(self.delay_time,) = struct.unpack('>L', bytes([0]) + data[1:]) | def function[populate, parameter[self, priority, address, rtr, data]]:
constant[
:return: None
]
assert[call[name[isinstance], parameter[name[data], name[bytes]]]]
call[name[self].needs_high_priority, parameter[name[priority]]]
call[name[self].needs_no_rtr, parameter[name[rtr]]]
call[name[self].needs_data, parameter[name[data], constant[4]]]
call[name[self].set_attributes, parameter[name[priority], name[address], name[rtr]]]
name[self].relay_channels assign[=] call[name[self].byte_to_channels, parameter[name[data]]]
<ast.Tuple object at 0x7da1b2485000> assign[=] call[name[struct].unpack, parameter[constant[>L], binary_operation[call[name[bytes], parameter[list[[<ast.Constant object at 0x7da1b2486650>]]]] + call[name[data]][<ast.Slice object at 0x7da1b2485e10>]]]] | keyword[def] identifier[populate] ( identifier[self] , identifier[priority] , identifier[address] , identifier[rtr] , identifier[data] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[data] , identifier[bytes] )
identifier[self] . identifier[needs_high_priority] ( identifier[priority] )
identifier[self] . identifier[needs_no_rtr] ( identifier[rtr] )
identifier[self] . identifier[needs_data] ( identifier[data] , literal[int] )
identifier[self] . identifier[set_attributes] ( identifier[priority] , identifier[address] , identifier[rtr] )
identifier[self] . identifier[relay_channels] = identifier[self] . identifier[byte_to_channels] ( identifier[data] )
( identifier[self] . identifier[delay_time] ,)= identifier[struct] . identifier[unpack] ( literal[string] , identifier[bytes] ([ literal[int] ])+ identifier[data] [ literal[int] :]) | def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_high_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 4)
self.set_attributes(priority, address, rtr)
self.relay_channels = self.byte_to_channels(data)
(self.delay_time,) = struct.unpack('>L', bytes([0]) + data[1:]) |
def _list_to_complex_array(complex_list):
"""Convert nested list of shape (..., 2) to complex numpy array with shape (...)
Args:
complex_list (list): List to convert.
Returns:
np.ndarray: Complex numpy aray
Raises:
QiskitError: If inner most array of input nested list is not of length 2.
"""
arr = np.asarray(complex_list, dtype=np.complex_)
if not arr.shape[-1] == 2:
raise QiskitError('Inner most nested list is not of length 2.')
return arr[..., 0] + 1j*arr[..., 1] | def function[_list_to_complex_array, parameter[complex_list]]:
constant[Convert nested list of shape (..., 2) to complex numpy array with shape (...)
Args:
complex_list (list): List to convert.
Returns:
np.ndarray: Complex numpy aray
Raises:
QiskitError: If inner most array of input nested list is not of length 2.
]
variable[arr] assign[=] call[name[np].asarray, parameter[name[complex_list]]]
if <ast.UnaryOp object at 0x7da1b0531db0> begin[:]
<ast.Raise object at 0x7da1b05329b0>
return[binary_operation[call[name[arr]][tuple[[<ast.Constant object at 0x7da1b0533460>, <ast.Constant object at 0x7da1b0532e00>]]] + binary_operation[constant[1j] * call[name[arr]][tuple[[<ast.Constant object at 0x7da1b0532d40>, <ast.Constant object at 0x7da1b0532620>]]]]]] | keyword[def] identifier[_list_to_complex_array] ( identifier[complex_list] ):
literal[string]
identifier[arr] = identifier[np] . identifier[asarray] ( identifier[complex_list] , identifier[dtype] = identifier[np] . identifier[complex_] )
keyword[if] keyword[not] identifier[arr] . identifier[shape] [- literal[int] ]== literal[int] :
keyword[raise] identifier[QiskitError] ( literal[string] )
keyword[return] identifier[arr] [..., literal[int] ]+ literal[int] * identifier[arr] [..., literal[int] ] | def _list_to_complex_array(complex_list):
"""Convert nested list of shape (..., 2) to complex numpy array with shape (...)
Args:
complex_list (list): List to convert.
Returns:
np.ndarray: Complex numpy aray
Raises:
QiskitError: If inner most array of input nested list is not of length 2.
"""
arr = np.asarray(complex_list, dtype=np.complex_)
if not arr.shape[-1] == 2:
raise QiskitError('Inner most nested list is not of length 2.') # depends on [control=['if'], data=[]]
return arr[..., 0] + 1j * arr[..., 1] |
def get_plural_tag_index(number, locale):
"""Gets the plural tag index of a number on the plural rule of a locale::
>>> get_plural_tag_index(1, 'en_US')
0
>>> get_plural_tag_index(2, 'en_US')
1
>>> get_plural_tag_index(100, 'en_US')
1
"""
locale = Locale.parse(locale)
plural_rule = locale.plural_form
used_tags = plural_rule.tags | set([_fallback_tag])
tag, index = plural_rule(number), 0
for _tag in _plural_tags:
if _tag == tag:
return index
if _tag in used_tags:
index += 1 | def function[get_plural_tag_index, parameter[number, locale]]:
constant[Gets the plural tag index of a number on the plural rule of a locale::
>>> get_plural_tag_index(1, 'en_US')
0
>>> get_plural_tag_index(2, 'en_US')
1
>>> get_plural_tag_index(100, 'en_US')
1
]
variable[locale] assign[=] call[name[Locale].parse, parameter[name[locale]]]
variable[plural_rule] assign[=] name[locale].plural_form
variable[used_tags] assign[=] binary_operation[name[plural_rule].tags <ast.BitOr object at 0x7da2590d6aa0> call[name[set], parameter[list[[<ast.Name object at 0x7da20c7950f0>]]]]]
<ast.Tuple object at 0x7da20c7952a0> assign[=] tuple[[<ast.Call object at 0x7da20c794b80>, <ast.Constant object at 0x7da20c796920>]]
for taget[name[_tag]] in starred[name[_plural_tags]] begin[:]
if compare[name[_tag] equal[==] name[tag]] begin[:]
return[name[index]]
if compare[name[_tag] in name[used_tags]] begin[:]
<ast.AugAssign object at 0x7da20c7955a0> | keyword[def] identifier[get_plural_tag_index] ( identifier[number] , identifier[locale] ):
literal[string]
identifier[locale] = identifier[Locale] . identifier[parse] ( identifier[locale] )
identifier[plural_rule] = identifier[locale] . identifier[plural_form]
identifier[used_tags] = identifier[plural_rule] . identifier[tags] | identifier[set] ([ identifier[_fallback_tag] ])
identifier[tag] , identifier[index] = identifier[plural_rule] ( identifier[number] ), literal[int]
keyword[for] identifier[_tag] keyword[in] identifier[_plural_tags] :
keyword[if] identifier[_tag] == identifier[tag] :
keyword[return] identifier[index]
keyword[if] identifier[_tag] keyword[in] identifier[used_tags] :
identifier[index] += literal[int] | def get_plural_tag_index(number, locale):
"""Gets the plural tag index of a number on the plural rule of a locale::
>>> get_plural_tag_index(1, 'en_US')
0
>>> get_plural_tag_index(2, 'en_US')
1
>>> get_plural_tag_index(100, 'en_US')
1
"""
locale = Locale.parse(locale)
plural_rule = locale.plural_form
used_tags = plural_rule.tags | set([_fallback_tag])
(tag, index) = (plural_rule(number), 0)
for _tag in _plural_tags:
if _tag == tag:
return index # depends on [control=['if'], data=[]]
if _tag in used_tags:
index += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_tag']] |
def _download_item(item_id, path='.', item=None):
"""
Download the requested item to the specified path.
:param item_id: The id of the item to be downloaded
:type item_id: int | long
:param path: (optional) the location to download the item
:type path: string
:param item: The dict of item info
:type item: dict | None
"""
session.token = verify_credentials()
filename, content_iter = session.communicator.download_item(
item_id, session.token)
item_path = os.path.join(path, filename)
print('Creating file at {0}'.format(item_path))
out_file = open(item_path, 'wb')
for block in content_iter:
out_file.write(block)
out_file.close()
for callback in session.item_download_callbacks:
if not item:
item = session.communicator.item_get(session.token, item_id)
callback(session.communicator, session.token, item, item_path) | def function[_download_item, parameter[item_id, path, item]]:
constant[
Download the requested item to the specified path.
:param item_id: The id of the item to be downloaded
:type item_id: int | long
:param path: (optional) the location to download the item
:type path: string
:param item: The dict of item info
:type item: dict | None
]
name[session].token assign[=] call[name[verify_credentials], parameter[]]
<ast.Tuple object at 0x7da1b021d480> assign[=] call[name[session].communicator.download_item, parameter[name[item_id], name[session].token]]
variable[item_path] assign[=] call[name[os].path.join, parameter[name[path], name[filename]]]
call[name[print], parameter[call[constant[Creating file at {0}].format, parameter[name[item_path]]]]]
variable[out_file] assign[=] call[name[open], parameter[name[item_path], constant[wb]]]
for taget[name[block]] in starred[name[content_iter]] begin[:]
call[name[out_file].write, parameter[name[block]]]
call[name[out_file].close, parameter[]]
for taget[name[callback]] in starred[name[session].item_download_callbacks] begin[:]
if <ast.UnaryOp object at 0x7da20e957670> begin[:]
variable[item] assign[=] call[name[session].communicator.item_get, parameter[name[session].token, name[item_id]]]
call[name[callback], parameter[name[session].communicator, name[session].token, name[item], name[item_path]]] | keyword[def] identifier[_download_item] ( identifier[item_id] , identifier[path] = literal[string] , identifier[item] = keyword[None] ):
literal[string]
identifier[session] . identifier[token] = identifier[verify_credentials] ()
identifier[filename] , identifier[content_iter] = identifier[session] . identifier[communicator] . identifier[download_item] (
identifier[item_id] , identifier[session] . identifier[token] )
identifier[item_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[filename] )
identifier[print] ( literal[string] . identifier[format] ( identifier[item_path] ))
identifier[out_file] = identifier[open] ( identifier[item_path] , literal[string] )
keyword[for] identifier[block] keyword[in] identifier[content_iter] :
identifier[out_file] . identifier[write] ( identifier[block] )
identifier[out_file] . identifier[close] ()
keyword[for] identifier[callback] keyword[in] identifier[session] . identifier[item_download_callbacks] :
keyword[if] keyword[not] identifier[item] :
identifier[item] = identifier[session] . identifier[communicator] . identifier[item_get] ( identifier[session] . identifier[token] , identifier[item_id] )
identifier[callback] ( identifier[session] . identifier[communicator] , identifier[session] . identifier[token] , identifier[item] , identifier[item_path] ) | def _download_item(item_id, path='.', item=None):
"""
Download the requested item to the specified path.
:param item_id: The id of the item to be downloaded
:type item_id: int | long
:param path: (optional) the location to download the item
:type path: string
:param item: The dict of item info
:type item: dict | None
"""
session.token = verify_credentials()
(filename, content_iter) = session.communicator.download_item(item_id, session.token)
item_path = os.path.join(path, filename)
print('Creating file at {0}'.format(item_path))
out_file = open(item_path, 'wb')
for block in content_iter:
out_file.write(block) # depends on [control=['for'], data=['block']]
out_file.close()
for callback in session.item_download_callbacks:
if not item:
item = session.communicator.item_get(session.token, item_id) # depends on [control=['if'], data=[]]
callback(session.communicator, session.token, item, item_path) # depends on [control=['for'], data=['callback']] |
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars | def function[set_stop_chars, parameter[self, stop_chars]]:
constant[
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
]
call[name[warnings].warn, parameter[constant[Method set_stop_chars is deprecated, use `set_stop_chars_left` or `set_stop_chars_right` instead], name[DeprecationWarning]]]
name[self]._stop_chars assign[=] call[name[set], parameter[name[stop_chars]]]
name[self]._stop_chars_left assign[=] name[self]._stop_chars
name[self]._stop_chars_right assign[=] name[self]._stop_chars | keyword[def] identifier[set_stop_chars] ( identifier[self] , identifier[stop_chars] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string] , identifier[DeprecationWarning] )
identifier[self] . identifier[_stop_chars] = identifier[set] ( identifier[stop_chars] )
identifier[self] . identifier[_stop_chars_left] = identifier[self] . identifier[_stop_chars]
identifier[self] . identifier[_stop_chars_right] = identifier[self] . identifier[_stop_chars] | def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn('Method set_stop_chars is deprecated, use `set_stop_chars_left` or `set_stop_chars_right` instead', DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars |
def set_state(profile, state, store='local'):
'''
Configure the firewall state.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Turn the firewall off when the domain profile is active
salt * firewall.set_state domain off
# Turn the firewall on when the public profile is active and set that in
# the local group policy
salt * firewall.set_state public on lgpo
'''
return salt.utils.win_lgpo_netsh.set_state(profile=profile,
state=state,
store=store) | def function[set_state, parameter[profile, state, store]]:
constant[
Configure the firewall state.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Turn the firewall off when the domain profile is active
salt * firewall.set_state domain off
# Turn the firewall on when the public profile is active and set that in
# the local group policy
salt * firewall.set_state public on lgpo
]
return[call[name[salt].utils.win_lgpo_netsh.set_state, parameter[]]] | keyword[def] identifier[set_state] ( identifier[profile] , identifier[state] , identifier[store] = literal[string] ):
literal[string]
keyword[return] identifier[salt] . identifier[utils] . identifier[win_lgpo_netsh] . identifier[set_state] ( identifier[profile] = identifier[profile] ,
identifier[state] = identifier[state] ,
identifier[store] = identifier[store] ) | def set_state(profile, state, store='local'):
"""
Configure the firewall state.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Turn the firewall off when the domain profile is active
salt * firewall.set_state domain off
# Turn the firewall on when the public profile is active and set that in
# the local group policy
salt * firewall.set_state public on lgpo
"""
return salt.utils.win_lgpo_netsh.set_state(profile=profile, state=state, store=store) |
def refine_rectwv_coeff(input_image, rectwv_coeff,
refine_wavecalib_mode,
minimum_slitlet_width_mm,
maximum_slitlet_width_mm,
save_intermediate_results=False,
debugplot=0):
"""Refine RectWaveCoeff object using a catalogue of lines
One and only one among refine_with_oh_lines_mode and
refine_with_arc_lines must be different from zero.
Parameters
----------
input_image : HDUList object
Input 2D image.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
refine_wavecalib_mode : int
Integer, indicating the type of refinement:
0 : no refinement
1 : apply the same global offset to all the slitlets (using ARC lines)
2 : apply individual offset to each slitlet (using ARC lines)
11 : apply the same global offset to all the slitlets (using OH lines)
12 : apply individual offset to each slitlet (using OH lines)
minimum_slitlet_width_mm : float
Minimum slitlet width (mm) for a valid slitlet.
maximum_slitlet_width_mm : float
Maximum slitlet width (mm) for a valid slitlet.
save_intermediate_results : bool
If True, save plots in PDF files
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
refined_rectwv_coeff : RectWaveCoeff instance
Refined rectification and wavelength calibration coefficients
for the particular CSU configuration.
expected_cat_image : HDUList object
Output 2D image with the expected catalogued lines.
"""
logger = logging.getLogger(__name__)
if save_intermediate_results:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages('crosscorrelation.pdf')
else:
pdf = None
# image header
main_header = input_image[0].header
filter_name = main_header['filter']
grism_name = main_header['grism']
# protections
if refine_wavecalib_mode not in [1, 2, 11, 12]:
logger.error('Wavelength calibration refinemente mode={}'. format(
refine_wavecalib_mode
))
raise ValueError("Invalid wavelength calibration refinement mode")
# read tabulated lines
if refine_wavecalib_mode in [1, 2]: # ARC lines
if grism_name == 'LR':
catlines_file = 'lines_argon_neon_xenon_empirical_LR.dat'
else:
catlines_file = 'lines_argon_neon_xenon_empirical.dat'
dumdata = pkgutil.get_data('emirdrp.instrument.configs', catlines_file)
arc_lines_tmpfile = StringIO(dumdata.decode('utf8'))
catlines = np.genfromtxt(arc_lines_tmpfile)
# define wavelength and flux as separate arrays
catlines_all_wave = catlines[:, 0]
catlines_all_flux = catlines[:, 1]
mode = refine_wavecalib_mode
elif refine_wavecalib_mode in [11, 12]: # OH lines
dumdata = pkgutil.get_data(
'emirdrp.instrument.configs',
'Oliva_etal_2013.dat'
)
oh_lines_tmpfile = StringIO(dumdata.decode('utf8'))
catlines = np.genfromtxt(oh_lines_tmpfile)
# define wavelength and flux as separate arrays
catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0]))
catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2]))
mode = refine_wavecalib_mode - 10
else:
raise ValueError('Unexpected mode={}'.format(refine_wavecalib_mode))
# initialize output
refined_rectwv_coeff = deepcopy(rectwv_coeff)
logger.info('Computing median spectrum')
# compute median spectrum and normalize it
sp_median = median_slitlets_rectified(
input_image,
mode=2,
minimum_slitlet_width_mm=minimum_slitlet_width_mm,
maximum_slitlet_width_mm=maximum_slitlet_width_mm
)[0].data
sp_median /= sp_median.max()
# determine minimum and maximum useful wavelength
jmin, jmax = find_pix_borders(sp_median, 0)
naxis1 = main_header['naxis1']
naxis2 = main_header['naxis2']
crpix1 = main_header['crpix1']
crval1 = main_header['crval1']
cdelt1 = main_header['cdelt1']
xwave = crval1 + (np.arange(naxis1) + 1.0 - crpix1) * cdelt1
if grism_name == 'LR':
wv_parameters = set_wv_parameters(filter_name, grism_name)
wave_min = wv_parameters['wvmin_useful']
wave_max = wv_parameters['wvmax_useful']
else:
wave_min = crval1 + (jmin + 1 - crpix1) * cdelt1
wave_max = crval1 + (jmax + 1 - crpix1) * cdelt1
logger.info('Setting wave_min to {}'.format(wave_min))
logger.info('Setting wave_max to {}'.format(wave_max))
# extract subset of catalogue lines within current wavelength range
lok1 = catlines_all_wave >= wave_min
lok2 = catlines_all_wave <= wave_max
catlines_reference_wave = catlines_all_wave[lok1*lok2]
catlines_reference_flux = catlines_all_flux[lok1*lok2]
catlines_reference_flux /= catlines_reference_flux.max()
# estimate sigma to broaden catalogue lines
csu_config = CsuConfiguration.define_from_header(main_header)
# segregate slitlets
list_useful_slitlets = csu_config.widths_in_range_mm(
minwidth=minimum_slitlet_width_mm,
maxwidth=maximum_slitlet_width_mm
)
# remove missing slitlets
if len(refined_rectwv_coeff.missing_slitlets) > 0:
for iremove in refined_rectwv_coeff.missing_slitlets:
if iremove in list_useful_slitlets:
list_useful_slitlets.remove(iremove)
list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1))
if i not in list_useful_slitlets]
logger.info('list of useful slitlets: {}'.format(
list_useful_slitlets))
logger.info('list of not useful slitlets: {}'.format(
list_not_useful_slitlets))
tempwidths = np.array([csu_config.csu_bar_slit_width(islitlet)
for islitlet in list_useful_slitlets])
widths_summary = summary(tempwidths)
logger.info('Statistics of useful slitlet widths (mm):')
logger.info('- npoints....: {0:d}'.format(widths_summary['npoints']))
logger.info('- mean.......: {0:7.3f}'.format(widths_summary['mean']))
logger.info('- median.....: {0:7.3f}'.format(widths_summary['median']))
logger.info('- std........: {0:7.3f}'.format(widths_summary['std']))
logger.info('- robust_std.: {0:7.3f}'.format(widths_summary['robust_std']))
# empirical transformation of slit width (mm) to pixels
sigma_broadening = cdelt1 * widths_summary['median']
# convolve location of catalogue lines to generate expected spectrum
xwave_reference, sp_reference = convolve_comb_lines(
catlines_reference_wave, catlines_reference_flux, sigma_broadening,
crpix1, crval1, cdelt1, naxis1
)
sp_reference /= sp_reference.max()
# generate image2d with expected lines
image2d_expected_lines = np.tile(sp_reference, (naxis2, 1))
hdu = fits.PrimaryHDU(data=image2d_expected_lines, header=main_header)
expected_cat_image = fits.HDUList([hdu])
if (abs(debugplot) % 10 != 0) or (pdf is not None):
ax = ximplotxy(xwave, sp_median, 'C1-',
xlabel='Wavelength (Angstroms, in vacuum)',
ylabel='Normalized number of counts',
title='Median spectrum',
label='observed spectrum', show=False)
# overplot reference catalogue lines
ax.stem(catlines_reference_wave, catlines_reference_flux, 'C4-',
markerfmt=' ', basefmt='C4-', label='tabulated lines')
# overplot convolved reference lines
ax.plot(xwave_reference, sp_reference, 'C0-',
label='expected spectrum')
ax.legend()
if pdf is not None:
pdf.savefig()
else:
pause_debugplot(debugplot=debugplot, pltshow=True)
# compute baseline signal in sp_median
baseline = np.percentile(sp_median[sp_median > 0], q=10)
if (abs(debugplot) % 10 != 0) or (pdf is not None):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(sp_median, bins=1000, log=True)
ax.set_xlabel('Normalized number of counts')
ax.set_ylabel('Number of pixels')
ax.set_title('Median spectrum')
ax.axvline(float(baseline), linestyle='--', color='grey')
if pdf is not None:
pdf.savefig()
else:
geometry = (0, 0, 640, 480)
set_window_geometry(geometry)
plt.show()
# subtract baseline to sp_median (only pixels with signal above zero)
lok = np.where(sp_median > 0)
sp_median[lok] -= baseline
# compute global offset through periodic correlation
logger.info('Computing global offset')
global_offset, fpeak = periodic_corr1d(
sp_reference=sp_reference,
sp_offset=sp_median,
fminmax=None,
naround_zero=50,
plottitle='Median spectrum (cross-correlation)',
pdf=pdf,
debugplot=debugplot
)
logger.info('Global offset: {} pixels'.format(-global_offset))
missing_slitlets = rectwv_coeff.missing_slitlets
if mode == 1:
# apply computed offset to obtain refined_rectwv_coeff_global
for islitlet in range(1, EMIR_NBARS + 1):
if islitlet not in missing_slitlets:
i = islitlet - 1
dumdict = refined_rectwv_coeff.contents[i]
dumdict['wpoly_coeff'][0] -= global_offset*cdelt1
elif mode == 2:
# compute individual offset for each slitlet
logger.info('Computing individual offsets')
median_55sp = median_slitlets_rectified(input_image, mode=1)
offset_array = np.zeros(EMIR_NBARS)
xplot = []
yplot = []
xplot_skipped = []
yplot_skipped = []
cout = '0'
for islitlet in range(1, EMIR_NBARS + 1):
if islitlet in list_useful_slitlets:
i = islitlet - 1
sp_median = median_55sp[0].data[i, :]
lok = np.where(sp_median > 0)
if np.any(lok):
baseline = np.percentile(sp_median[lok], q=10)
sp_median[lok] -= baseline
sp_median /= sp_median.max()
offset_array[i], fpeak = periodic_corr1d(
sp_reference=sp_reference,
sp_offset=median_55sp[0].data[i, :],
fminmax=None,
naround_zero=50,
plottitle='slitlet #{0} (cross-correlation)'.format(
islitlet),
pdf=pdf,
debugplot=debugplot
)
else:
offset_array[i] = 0.0
dumdict = refined_rectwv_coeff.contents[i]
dumdict['wpoly_coeff'][0] -= offset_array[i]*cdelt1
xplot.append(islitlet)
yplot.append(-offset_array[i])
# second correction
wpoly_coeff_refined = check_wlcalib_sp(
sp=median_55sp[0].data[i, :],
crpix1=crpix1,
crval1=crval1-offset_array[i]*cdelt1,
cdelt1=cdelt1,
wv_master=catlines_reference_wave,
coeff_ini=dumdict['wpoly_coeff'],
naxis1_ini=EMIR_NAXIS1,
title='slitlet #{0} (after applying offset)'.format(
islitlet),
ylogscale=False,
pdf=pdf,
debugplot=debugplot
)
dumdict['wpoly_coeff'] = wpoly_coeff_refined
cout += '.'
else:
xplot_skipped.append(islitlet)
yplot_skipped.append(0)
cout += 'i'
if islitlet % 10 == 0:
if cout != 'i':
cout = str(islitlet // 10)
logger.info(cout)
# show offsets with opposite sign
stat_summary = summary(np.array(yplot))
logger.info('Statistics of individual slitlet offsets (pixels):')
logger.info('- npoints....: {0:d}'.format(stat_summary['npoints']))
logger.info('- mean.......: {0:7.3f}'.format(stat_summary['mean']))
logger.info('- median.....: {0:7.3f}'.format(stat_summary['median']))
logger.info('- std........: {0:7.3f}'.format(stat_summary['std']))
logger.info('- robust_std.: {0:7.3f}'.format(stat_summary[
'robust_std']))
if (abs(debugplot) % 10 != 0) or (pdf is not None):
ax = ximplotxy(xplot, yplot,
linestyle='', marker='o', color='C0',
xlabel='slitlet number',
ylabel='-offset (pixels) = offset to be applied',
title='cross-correlation result',
show=False, **{'label': 'individual slitlets'})
if len(xplot_skipped) > 0:
ax.plot(xplot_skipped, yplot_skipped, 'mx')
ax.axhline(-global_offset, linestyle='--', color='C1',
label='global offset')
ax.legend()
if pdf is not None:
pdf.savefig()
else:
pause_debugplot(debugplot=debugplot, pltshow=True)
else:
raise ValueError('Unexpected mode={}'.format(mode))
# close output PDF file
if pdf is not None:
pdf.close()
# return result
return refined_rectwv_coeff, expected_cat_image | def function[refine_rectwv_coeff, parameter[input_image, rectwv_coeff, refine_wavecalib_mode, minimum_slitlet_width_mm, maximum_slitlet_width_mm, save_intermediate_results, debugplot]]:
constant[Refine RectWaveCoeff object using a catalogue of lines
One and only one among refine_with_oh_lines_mode and
refine_with_arc_lines must be different from zero.
Parameters
----------
input_image : HDUList object
Input 2D image.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
refine_wavecalib_mode : int
Integer, indicating the type of refinement:
0 : no refinement
1 : apply the same global offset to all the slitlets (using ARC lines)
2 : apply individual offset to each slitlet (using ARC lines)
11 : apply the same global offset to all the slitlets (using OH lines)
12 : apply individual offset to each slitlet (using OH lines)
minimum_slitlet_width_mm : float
Minimum slitlet width (mm) for a valid slitlet.
maximum_slitlet_width_mm : float
Maximum slitlet width (mm) for a valid slitlet.
save_intermediate_results : bool
If True, save plots in PDF files
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
refined_rectwv_coeff : RectWaveCoeff instance
Refined rectification and wavelength calibration coefficients
for the particular CSU configuration.
expected_cat_image : HDUList object
Output 2D image with the expected catalogued lines.
]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
if name[save_intermediate_results] begin[:]
from relative_module[matplotlib.backends.backend_pdf] import module[PdfPages]
variable[pdf] assign[=] call[name[PdfPages], parameter[constant[crosscorrelation.pdf]]]
variable[main_header] assign[=] call[name[input_image]][constant[0]].header
variable[filter_name] assign[=] call[name[main_header]][constant[filter]]
variable[grism_name] assign[=] call[name[main_header]][constant[grism]]
if compare[name[refine_wavecalib_mode] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da204621060>, <ast.Constant object at 0x7da2046217b0>, <ast.Constant object at 0x7da2046201c0>, <ast.Constant object at 0x7da204622380>]]] begin[:]
call[name[logger].error, parameter[call[constant[Wavelength calibration refinemente mode={}].format, parameter[name[refine_wavecalib_mode]]]]]
<ast.Raise object at 0x7da204621ba0>
if compare[name[refine_wavecalib_mode] in list[[<ast.Constant object at 0x7da2046229e0>, <ast.Constant object at 0x7da204621f30>]]] begin[:]
if compare[name[grism_name] equal[==] constant[LR]] begin[:]
variable[catlines_file] assign[=] constant[lines_argon_neon_xenon_empirical_LR.dat]
variable[dumdata] assign[=] call[name[pkgutil].get_data, parameter[constant[emirdrp.instrument.configs], name[catlines_file]]]
variable[arc_lines_tmpfile] assign[=] call[name[StringIO], parameter[call[name[dumdata].decode, parameter[constant[utf8]]]]]
variable[catlines] assign[=] call[name[np].genfromtxt, parameter[name[arc_lines_tmpfile]]]
variable[catlines_all_wave] assign[=] call[name[catlines]][tuple[[<ast.Slice object at 0x7da2044c0670>, <ast.Constant object at 0x7da2044c1e10>]]]
variable[catlines_all_flux] assign[=] call[name[catlines]][tuple[[<ast.Slice object at 0x7da2044c1a20>, <ast.Constant object at 0x7da2044c3c40>]]]
variable[mode] assign[=] name[refine_wavecalib_mode]
variable[refined_rectwv_coeff] assign[=] call[name[deepcopy], parameter[name[rectwv_coeff]]]
call[name[logger].info, parameter[constant[Computing median spectrum]]]
variable[sp_median] assign[=] call[call[name[median_slitlets_rectified], parameter[name[input_image]]]][constant[0]].data
<ast.AugAssign object at 0x7da2044c3a90>
<ast.Tuple object at 0x7da2044c13f0> assign[=] call[name[find_pix_borders], parameter[name[sp_median], constant[0]]]
variable[naxis1] assign[=] call[name[main_header]][constant[naxis1]]
variable[naxis2] assign[=] call[name[main_header]][constant[naxis2]]
variable[crpix1] assign[=] call[name[main_header]][constant[crpix1]]
variable[crval1] assign[=] call[name[main_header]][constant[crval1]]
variable[cdelt1] assign[=] call[name[main_header]][constant[cdelt1]]
variable[xwave] assign[=] binary_operation[name[crval1] + binary_operation[binary_operation[binary_operation[call[name[np].arange, parameter[name[naxis1]]] + constant[1.0]] - name[crpix1]] * name[cdelt1]]]
if compare[name[grism_name] equal[==] constant[LR]] begin[:]
variable[wv_parameters] assign[=] call[name[set_wv_parameters], parameter[name[filter_name], name[grism_name]]]
variable[wave_min] assign[=] call[name[wv_parameters]][constant[wvmin_useful]]
variable[wave_max] assign[=] call[name[wv_parameters]][constant[wvmax_useful]]
call[name[logger].info, parameter[call[constant[Setting wave_min to {}].format, parameter[name[wave_min]]]]]
call[name[logger].info, parameter[call[constant[Setting wave_max to {}].format, parameter[name[wave_max]]]]]
variable[lok1] assign[=] compare[name[catlines_all_wave] greater_or_equal[>=] name[wave_min]]
variable[lok2] assign[=] compare[name[catlines_all_wave] less_or_equal[<=] name[wave_max]]
variable[catlines_reference_wave] assign[=] call[name[catlines_all_wave]][binary_operation[name[lok1] * name[lok2]]]
variable[catlines_reference_flux] assign[=] call[name[catlines_all_flux]][binary_operation[name[lok1] * name[lok2]]]
<ast.AugAssign object at 0x7da20c6e6980>
variable[csu_config] assign[=] call[name[CsuConfiguration].define_from_header, parameter[name[main_header]]]
variable[list_useful_slitlets] assign[=] call[name[csu_config].widths_in_range_mm, parameter[]]
if compare[call[name[len], parameter[name[refined_rectwv_coeff].missing_slitlets]] greater[>] constant[0]] begin[:]
for taget[name[iremove]] in starred[name[refined_rectwv_coeff].missing_slitlets] begin[:]
if compare[name[iremove] in name[list_useful_slitlets]] begin[:]
call[name[list_useful_slitlets].remove, parameter[name[iremove]]]
variable[list_not_useful_slitlets] assign[=] <ast.ListComp object at 0x7da20c6e4d00>
call[name[logger].info, parameter[call[constant[list of useful slitlets: {}].format, parameter[name[list_useful_slitlets]]]]]
call[name[logger].info, parameter[call[constant[list of not useful slitlets: {}].format, parameter[name[list_not_useful_slitlets]]]]]
variable[tempwidths] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da20c6e75b0>]]
variable[widths_summary] assign[=] call[name[summary], parameter[name[tempwidths]]]
call[name[logger].info, parameter[constant[Statistics of useful slitlet widths (mm):]]]
call[name[logger].info, parameter[call[constant[- npoints....: {0:d}].format, parameter[call[name[widths_summary]][constant[npoints]]]]]]
call[name[logger].info, parameter[call[constant[- mean.......: {0:7.3f}].format, parameter[call[name[widths_summary]][constant[mean]]]]]]
call[name[logger].info, parameter[call[constant[- median.....: {0:7.3f}].format, parameter[call[name[widths_summary]][constant[median]]]]]]
call[name[logger].info, parameter[call[constant[- std........: {0:7.3f}].format, parameter[call[name[widths_summary]][constant[std]]]]]]
call[name[logger].info, parameter[call[constant[- robust_std.: {0:7.3f}].format, parameter[call[name[widths_summary]][constant[robust_std]]]]]]
variable[sigma_broadening] assign[=] binary_operation[name[cdelt1] * call[name[widths_summary]][constant[median]]]
<ast.Tuple object at 0x7da20c7c8190> assign[=] call[name[convolve_comb_lines], parameter[name[catlines_reference_wave], name[catlines_reference_flux], name[sigma_broadening], name[crpix1], name[crval1], name[cdelt1], name[naxis1]]]
<ast.AugAssign object at 0x7da20c7ca110>
variable[image2d_expected_lines] assign[=] call[name[np].tile, parameter[name[sp_reference], tuple[[<ast.Name object at 0x7da20c7c9a50>, <ast.Constant object at 0x7da20c7cbb20>]]]]
variable[hdu] assign[=] call[name[fits].PrimaryHDU, parameter[]]
variable[expected_cat_image] assign[=] call[name[fits].HDUList, parameter[list[[<ast.Name object at 0x7da20c7c8100>]]]]
if <ast.BoolOp object at 0x7da20c7c8be0> begin[:]
variable[ax] assign[=] call[name[ximplotxy], parameter[name[xwave], name[sp_median], constant[C1-]]]
call[name[ax].stem, parameter[name[catlines_reference_wave], name[catlines_reference_flux], constant[C4-]]]
call[name[ax].plot, parameter[name[xwave_reference], name[sp_reference], constant[C0-]]]
call[name[ax].legend, parameter[]]
if compare[name[pdf] is_not constant[None]] begin[:]
call[name[pdf].savefig, parameter[]]
variable[baseline] assign[=] call[name[np].percentile, parameter[call[name[sp_median]][compare[name[sp_median] greater[>] constant[0]]]]]
if <ast.BoolOp object at 0x7da20c7c9d80> begin[:]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[ax] assign[=] call[name[fig].add_subplot, parameter[constant[111]]]
call[name[ax].hist, parameter[name[sp_median]]]
call[name[ax].set_xlabel, parameter[constant[Normalized number of counts]]]
call[name[ax].set_ylabel, parameter[constant[Number of pixels]]]
call[name[ax].set_title, parameter[constant[Median spectrum]]]
call[name[ax].axvline, parameter[call[name[float], parameter[name[baseline]]]]]
if compare[name[pdf] is_not constant[None]] begin[:]
call[name[pdf].savefig, parameter[]]
variable[lok] assign[=] call[name[np].where, parameter[compare[name[sp_median] greater[>] constant[0]]]]
<ast.AugAssign object at 0x7da20c7cb0a0>
call[name[logger].info, parameter[constant[Computing global offset]]]
<ast.Tuple object at 0x7da20c7cbca0> assign[=] call[name[periodic_corr1d], parameter[]]
call[name[logger].info, parameter[call[constant[Global offset: {} pixels].format, parameter[<ast.UnaryOp object at 0x7da18f09c550>]]]]
variable[missing_slitlets] assign[=] name[rectwv_coeff].missing_slitlets
if compare[name[mode] equal[==] constant[1]] begin[:]
for taget[name[islitlet]] in starred[call[name[range], parameter[constant[1], binary_operation[name[EMIR_NBARS] + constant[1]]]]] begin[:]
if compare[name[islitlet] <ast.NotIn object at 0x7da2590d7190> name[missing_slitlets]] begin[:]
variable[i] assign[=] binary_operation[name[islitlet] - constant[1]]
variable[dumdict] assign[=] call[name[refined_rectwv_coeff].contents][name[i]]
<ast.AugAssign object at 0x7da18f09f4f0>
if compare[name[pdf] is_not constant[None]] begin[:]
call[name[pdf].close, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b1a746a0>, <ast.Name object at 0x7da1b1a76620>]]] | keyword[def] identifier[refine_rectwv_coeff] ( identifier[input_image] , identifier[rectwv_coeff] ,
identifier[refine_wavecalib_mode] ,
identifier[minimum_slitlet_width_mm] ,
identifier[maximum_slitlet_width_mm] ,
identifier[save_intermediate_results] = keyword[False] ,
identifier[debugplot] = literal[int] ):
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
keyword[if] identifier[save_intermediate_results] :
keyword[from] identifier[matplotlib] . identifier[backends] . identifier[backend_pdf] keyword[import] identifier[PdfPages]
identifier[pdf] = identifier[PdfPages] ( literal[string] )
keyword[else] :
identifier[pdf] = keyword[None]
identifier[main_header] = identifier[input_image] [ literal[int] ]. identifier[header]
identifier[filter_name] = identifier[main_header] [ literal[string] ]
identifier[grism_name] = identifier[main_header] [ literal[string] ]
keyword[if] identifier[refine_wavecalib_mode] keyword[not] keyword[in] [ literal[int] , literal[int] , literal[int] , literal[int] ]:
identifier[logger] . identifier[error] ( literal[string] . identifier[format] (
identifier[refine_wavecalib_mode]
))
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[refine_wavecalib_mode] keyword[in] [ literal[int] , literal[int] ]:
keyword[if] identifier[grism_name] == literal[string] :
identifier[catlines_file] = literal[string]
keyword[else] :
identifier[catlines_file] = literal[string]
identifier[dumdata] = identifier[pkgutil] . identifier[get_data] ( literal[string] , identifier[catlines_file] )
identifier[arc_lines_tmpfile] = identifier[StringIO] ( identifier[dumdata] . identifier[decode] ( literal[string] ))
identifier[catlines] = identifier[np] . identifier[genfromtxt] ( identifier[arc_lines_tmpfile] )
identifier[catlines_all_wave] = identifier[catlines] [:, literal[int] ]
identifier[catlines_all_flux] = identifier[catlines] [:, literal[int] ]
identifier[mode] = identifier[refine_wavecalib_mode]
keyword[elif] identifier[refine_wavecalib_mode] keyword[in] [ literal[int] , literal[int] ]:
identifier[dumdata] = identifier[pkgutil] . identifier[get_data] (
literal[string] ,
literal[string]
)
identifier[oh_lines_tmpfile] = identifier[StringIO] ( identifier[dumdata] . identifier[decode] ( literal[string] ))
identifier[catlines] = identifier[np] . identifier[genfromtxt] ( identifier[oh_lines_tmpfile] )
identifier[catlines_all_wave] = identifier[np] . identifier[concatenate] (( identifier[catlines] [:, literal[int] ], identifier[catlines] [:, literal[int] ]))
identifier[catlines_all_flux] = identifier[np] . identifier[concatenate] (( identifier[catlines] [:, literal[int] ], identifier[catlines] [:, literal[int] ]))
identifier[mode] = identifier[refine_wavecalib_mode] - literal[int]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[refine_wavecalib_mode] ))
identifier[refined_rectwv_coeff] = identifier[deepcopy] ( identifier[rectwv_coeff] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[sp_median] = identifier[median_slitlets_rectified] (
identifier[input_image] ,
identifier[mode] = literal[int] ,
identifier[minimum_slitlet_width_mm] = identifier[minimum_slitlet_width_mm] ,
identifier[maximum_slitlet_width_mm] = identifier[maximum_slitlet_width_mm]
)[ literal[int] ]. identifier[data]
identifier[sp_median] /= identifier[sp_median] . identifier[max] ()
identifier[jmin] , identifier[jmax] = identifier[find_pix_borders] ( identifier[sp_median] , literal[int] )
identifier[naxis1] = identifier[main_header] [ literal[string] ]
identifier[naxis2] = identifier[main_header] [ literal[string] ]
identifier[crpix1] = identifier[main_header] [ literal[string] ]
identifier[crval1] = identifier[main_header] [ literal[string] ]
identifier[cdelt1] = identifier[main_header] [ literal[string] ]
identifier[xwave] = identifier[crval1] +( identifier[np] . identifier[arange] ( identifier[naxis1] )+ literal[int] - identifier[crpix1] )* identifier[cdelt1]
keyword[if] identifier[grism_name] == literal[string] :
identifier[wv_parameters] = identifier[set_wv_parameters] ( identifier[filter_name] , identifier[grism_name] )
identifier[wave_min] = identifier[wv_parameters] [ literal[string] ]
identifier[wave_max] = identifier[wv_parameters] [ literal[string] ]
keyword[else] :
identifier[wave_min] = identifier[crval1] +( identifier[jmin] + literal[int] - identifier[crpix1] )* identifier[cdelt1]
identifier[wave_max] = identifier[crval1] +( identifier[jmax] + literal[int] - identifier[crpix1] )* identifier[cdelt1]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[wave_min] ))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[wave_max] ))
identifier[lok1] = identifier[catlines_all_wave] >= identifier[wave_min]
identifier[lok2] = identifier[catlines_all_wave] <= identifier[wave_max]
identifier[catlines_reference_wave] = identifier[catlines_all_wave] [ identifier[lok1] * identifier[lok2] ]
identifier[catlines_reference_flux] = identifier[catlines_all_flux] [ identifier[lok1] * identifier[lok2] ]
identifier[catlines_reference_flux] /= identifier[catlines_reference_flux] . identifier[max] ()
identifier[csu_config] = identifier[CsuConfiguration] . identifier[define_from_header] ( identifier[main_header] )
identifier[list_useful_slitlets] = identifier[csu_config] . identifier[widths_in_range_mm] (
identifier[minwidth] = identifier[minimum_slitlet_width_mm] ,
identifier[maxwidth] = identifier[maximum_slitlet_width_mm]
)
keyword[if] identifier[len] ( identifier[refined_rectwv_coeff] . identifier[missing_slitlets] )> literal[int] :
keyword[for] identifier[iremove] keyword[in] identifier[refined_rectwv_coeff] . identifier[missing_slitlets] :
keyword[if] identifier[iremove] keyword[in] identifier[list_useful_slitlets] :
identifier[list_useful_slitlets] . identifier[remove] ( identifier[iremove] )
identifier[list_not_useful_slitlets] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[list] ( identifier[range] ( literal[int] , identifier[EMIR_NBARS] + literal[int] ))
keyword[if] identifier[i] keyword[not] keyword[in] identifier[list_useful_slitlets] ]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] (
identifier[list_useful_slitlets] ))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] (
identifier[list_not_useful_slitlets] ))
identifier[tempwidths] = identifier[np] . identifier[array] ([ identifier[csu_config] . identifier[csu_bar_slit_width] ( identifier[islitlet] )
keyword[for] identifier[islitlet] keyword[in] identifier[list_useful_slitlets] ])
identifier[widths_summary] = identifier[summary] ( identifier[tempwidths] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[widths_summary] [ literal[string] ]))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[widths_summary] [ literal[string] ]))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[widths_summary] [ literal[string] ]))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[widths_summary] [ literal[string] ]))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[widths_summary] [ literal[string] ]))
identifier[sigma_broadening] = identifier[cdelt1] * identifier[widths_summary] [ literal[string] ]
identifier[xwave_reference] , identifier[sp_reference] = identifier[convolve_comb_lines] (
identifier[catlines_reference_wave] , identifier[catlines_reference_flux] , identifier[sigma_broadening] ,
identifier[crpix1] , identifier[crval1] , identifier[cdelt1] , identifier[naxis1]
)
identifier[sp_reference] /= identifier[sp_reference] . identifier[max] ()
identifier[image2d_expected_lines] = identifier[np] . identifier[tile] ( identifier[sp_reference] ,( identifier[naxis2] , literal[int] ))
identifier[hdu] = identifier[fits] . identifier[PrimaryHDU] ( identifier[data] = identifier[image2d_expected_lines] , identifier[header] = identifier[main_header] )
identifier[expected_cat_image] = identifier[fits] . identifier[HDUList] ([ identifier[hdu] ])
keyword[if] ( identifier[abs] ( identifier[debugplot] )% literal[int] != literal[int] ) keyword[or] ( identifier[pdf] keyword[is] keyword[not] keyword[None] ):
identifier[ax] = identifier[ximplotxy] ( identifier[xwave] , identifier[sp_median] , literal[string] ,
identifier[xlabel] = literal[string] ,
identifier[ylabel] = literal[string] ,
identifier[title] = literal[string] ,
identifier[label] = literal[string] , identifier[show] = keyword[False] )
identifier[ax] . identifier[stem] ( identifier[catlines_reference_wave] , identifier[catlines_reference_flux] , literal[string] ,
identifier[markerfmt] = literal[string] , identifier[basefmt] = literal[string] , identifier[label] = literal[string] )
identifier[ax] . identifier[plot] ( identifier[xwave_reference] , identifier[sp_reference] , literal[string] ,
identifier[label] = literal[string] )
identifier[ax] . identifier[legend] ()
keyword[if] identifier[pdf] keyword[is] keyword[not] keyword[None] :
identifier[pdf] . identifier[savefig] ()
keyword[else] :
identifier[pause_debugplot] ( identifier[debugplot] = identifier[debugplot] , identifier[pltshow] = keyword[True] )
identifier[baseline] = identifier[np] . identifier[percentile] ( identifier[sp_median] [ identifier[sp_median] > literal[int] ], identifier[q] = literal[int] )
keyword[if] ( identifier[abs] ( identifier[debugplot] )% literal[int] != literal[int] ) keyword[or] ( identifier[pdf] keyword[is] keyword[not] keyword[None] ):
identifier[fig] = identifier[plt] . identifier[figure] ()
identifier[ax] = identifier[fig] . identifier[add_subplot] ( literal[int] )
identifier[ax] . identifier[hist] ( identifier[sp_median] , identifier[bins] = literal[int] , identifier[log] = keyword[True] )
identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[ax] . identifier[set_title] ( literal[string] )
identifier[ax] . identifier[axvline] ( identifier[float] ( identifier[baseline] ), identifier[linestyle] = literal[string] , identifier[color] = literal[string] )
keyword[if] identifier[pdf] keyword[is] keyword[not] keyword[None] :
identifier[pdf] . identifier[savefig] ()
keyword[else] :
identifier[geometry] =( literal[int] , literal[int] , literal[int] , literal[int] )
identifier[set_window_geometry] ( identifier[geometry] )
identifier[plt] . identifier[show] ()
identifier[lok] = identifier[np] . identifier[where] ( identifier[sp_median] > literal[int] )
identifier[sp_median] [ identifier[lok] ]-= identifier[baseline]
identifier[logger] . identifier[info] ( literal[string] )
identifier[global_offset] , identifier[fpeak] = identifier[periodic_corr1d] (
identifier[sp_reference] = identifier[sp_reference] ,
identifier[sp_offset] = identifier[sp_median] ,
identifier[fminmax] = keyword[None] ,
identifier[naround_zero] = literal[int] ,
identifier[plottitle] = literal[string] ,
identifier[pdf] = identifier[pdf] ,
identifier[debugplot] = identifier[debugplot]
)
identifier[logger] . identifier[info] ( literal[string] . identifier[format] (- identifier[global_offset] ))
identifier[missing_slitlets] = identifier[rectwv_coeff] . identifier[missing_slitlets]
keyword[if] identifier[mode] == literal[int] :
keyword[for] identifier[islitlet] keyword[in] identifier[range] ( literal[int] , identifier[EMIR_NBARS] + literal[int] ):
keyword[if] identifier[islitlet] keyword[not] keyword[in] identifier[missing_slitlets] :
identifier[i] = identifier[islitlet] - literal[int]
identifier[dumdict] = identifier[refined_rectwv_coeff] . identifier[contents] [ identifier[i] ]
identifier[dumdict] [ literal[string] ][ literal[int] ]-= identifier[global_offset] * identifier[cdelt1]
keyword[elif] identifier[mode] == literal[int] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[median_55sp] = identifier[median_slitlets_rectified] ( identifier[input_image] , identifier[mode] = literal[int] )
identifier[offset_array] = identifier[np] . identifier[zeros] ( identifier[EMIR_NBARS] )
identifier[xplot] =[]
identifier[yplot] =[]
identifier[xplot_skipped] =[]
identifier[yplot_skipped] =[]
identifier[cout] = literal[string]
keyword[for] identifier[islitlet] keyword[in] identifier[range] ( literal[int] , identifier[EMIR_NBARS] + literal[int] ):
keyword[if] identifier[islitlet] keyword[in] identifier[list_useful_slitlets] :
identifier[i] = identifier[islitlet] - literal[int]
identifier[sp_median] = identifier[median_55sp] [ literal[int] ]. identifier[data] [ identifier[i] ,:]
identifier[lok] = identifier[np] . identifier[where] ( identifier[sp_median] > literal[int] )
keyword[if] identifier[np] . identifier[any] ( identifier[lok] ):
identifier[baseline] = identifier[np] . identifier[percentile] ( identifier[sp_median] [ identifier[lok] ], identifier[q] = literal[int] )
identifier[sp_median] [ identifier[lok] ]-= identifier[baseline]
identifier[sp_median] /= identifier[sp_median] . identifier[max] ()
identifier[offset_array] [ identifier[i] ], identifier[fpeak] = identifier[periodic_corr1d] (
identifier[sp_reference] = identifier[sp_reference] ,
identifier[sp_offset] = identifier[median_55sp] [ literal[int] ]. identifier[data] [ identifier[i] ,:],
identifier[fminmax] = keyword[None] ,
identifier[naround_zero] = literal[int] ,
identifier[plottitle] = literal[string] . identifier[format] (
identifier[islitlet] ),
identifier[pdf] = identifier[pdf] ,
identifier[debugplot] = identifier[debugplot]
)
keyword[else] :
identifier[offset_array] [ identifier[i] ]= literal[int]
identifier[dumdict] = identifier[refined_rectwv_coeff] . identifier[contents] [ identifier[i] ]
identifier[dumdict] [ literal[string] ][ literal[int] ]-= identifier[offset_array] [ identifier[i] ]* identifier[cdelt1]
identifier[xplot] . identifier[append] ( identifier[islitlet] )
identifier[yplot] . identifier[append] (- identifier[offset_array] [ identifier[i] ])
identifier[wpoly_coeff_refined] = identifier[check_wlcalib_sp] (
identifier[sp] = identifier[median_55sp] [ literal[int] ]. identifier[data] [ identifier[i] ,:],
identifier[crpix1] = identifier[crpix1] ,
identifier[crval1] = identifier[crval1] - identifier[offset_array] [ identifier[i] ]* identifier[cdelt1] ,
identifier[cdelt1] = identifier[cdelt1] ,
identifier[wv_master] = identifier[catlines_reference_wave] ,
identifier[coeff_ini] = identifier[dumdict] [ literal[string] ],
identifier[naxis1_ini] = identifier[EMIR_NAXIS1] ,
identifier[title] = literal[string] . identifier[format] (
identifier[islitlet] ),
identifier[ylogscale] = keyword[False] ,
identifier[pdf] = identifier[pdf] ,
identifier[debugplot] = identifier[debugplot]
)
identifier[dumdict] [ literal[string] ]= identifier[wpoly_coeff_refined]
identifier[cout] += literal[string]
keyword[else] :
identifier[xplot_skipped] . identifier[append] ( identifier[islitlet] )
identifier[yplot_skipped] . identifier[append] ( literal[int] )
identifier[cout] += literal[string]
keyword[if] identifier[islitlet] % literal[int] == literal[int] :
keyword[if] identifier[cout] != literal[string] :
identifier[cout] = identifier[str] ( identifier[islitlet] // literal[int] )
identifier[logger] . identifier[info] ( identifier[cout] )
identifier[stat_summary] = identifier[summary] ( identifier[np] . identifier[array] ( identifier[yplot] ))
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[stat_summary] [ literal[string] ]))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[stat_summary] [ literal[string] ]))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[stat_summary] [ literal[string] ]))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[stat_summary] [ literal[string] ]))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[stat_summary] [
literal[string] ]))
keyword[if] ( identifier[abs] ( identifier[debugplot] )% literal[int] != literal[int] ) keyword[or] ( identifier[pdf] keyword[is] keyword[not] keyword[None] ):
identifier[ax] = identifier[ximplotxy] ( identifier[xplot] , identifier[yplot] ,
identifier[linestyle] = literal[string] , identifier[marker] = literal[string] , identifier[color] = literal[string] ,
identifier[xlabel] = literal[string] ,
identifier[ylabel] = literal[string] ,
identifier[title] = literal[string] ,
identifier[show] = keyword[False] ,**{ literal[string] : literal[string] })
keyword[if] identifier[len] ( identifier[xplot_skipped] )> literal[int] :
identifier[ax] . identifier[plot] ( identifier[xplot_skipped] , identifier[yplot_skipped] , literal[string] )
identifier[ax] . identifier[axhline] (- identifier[global_offset] , identifier[linestyle] = literal[string] , identifier[color] = literal[string] ,
identifier[label] = literal[string] )
identifier[ax] . identifier[legend] ()
keyword[if] identifier[pdf] keyword[is] keyword[not] keyword[None] :
identifier[pdf] . identifier[savefig] ()
keyword[else] :
identifier[pause_debugplot] ( identifier[debugplot] = identifier[debugplot] , identifier[pltshow] = keyword[True] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[mode] ))
keyword[if] identifier[pdf] keyword[is] keyword[not] keyword[None] :
identifier[pdf] . identifier[close] ()
keyword[return] identifier[refined_rectwv_coeff] , identifier[expected_cat_image] | def refine_rectwv_coeff(input_image, rectwv_coeff, refine_wavecalib_mode, minimum_slitlet_width_mm, maximum_slitlet_width_mm, save_intermediate_results=False, debugplot=0):
"""Refine RectWaveCoeff object using a catalogue of lines
One and only one among refine_with_oh_lines_mode and
refine_with_arc_lines must be different from zero.
Parameters
----------
input_image : HDUList object
Input 2D image.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
refine_wavecalib_mode : int
Integer, indicating the type of refinement:
0 : no refinement
1 : apply the same global offset to all the slitlets (using ARC lines)
2 : apply individual offset to each slitlet (using ARC lines)
11 : apply the same global offset to all the slitlets (using OH lines)
12 : apply individual offset to each slitlet (using OH lines)
minimum_slitlet_width_mm : float
Minimum slitlet width (mm) for a valid slitlet.
maximum_slitlet_width_mm : float
Maximum slitlet width (mm) for a valid slitlet.
save_intermediate_results : bool
If True, save plots in PDF files
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
refined_rectwv_coeff : RectWaveCoeff instance
Refined rectification and wavelength calibration coefficients
for the particular CSU configuration.
expected_cat_image : HDUList object
Output 2D image with the expected catalogued lines.
"""
logger = logging.getLogger(__name__)
if save_intermediate_results:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages('crosscorrelation.pdf') # depends on [control=['if'], data=[]]
else:
pdf = None
# image header
main_header = input_image[0].header
filter_name = main_header['filter']
grism_name = main_header['grism']
# protections
if refine_wavecalib_mode not in [1, 2, 11, 12]:
logger.error('Wavelength calibration refinemente mode={}'.format(refine_wavecalib_mode))
raise ValueError('Invalid wavelength calibration refinement mode') # depends on [control=['if'], data=['refine_wavecalib_mode']]
# read tabulated lines
if refine_wavecalib_mode in [1, 2]: # ARC lines
if grism_name == 'LR':
catlines_file = 'lines_argon_neon_xenon_empirical_LR.dat' # depends on [control=['if'], data=[]]
else:
catlines_file = 'lines_argon_neon_xenon_empirical.dat'
dumdata = pkgutil.get_data('emirdrp.instrument.configs', catlines_file)
arc_lines_tmpfile = StringIO(dumdata.decode('utf8'))
catlines = np.genfromtxt(arc_lines_tmpfile)
# define wavelength and flux as separate arrays
catlines_all_wave = catlines[:, 0]
catlines_all_flux = catlines[:, 1]
mode = refine_wavecalib_mode # depends on [control=['if'], data=['refine_wavecalib_mode']]
elif refine_wavecalib_mode in [11, 12]: # OH lines
dumdata = pkgutil.get_data('emirdrp.instrument.configs', 'Oliva_etal_2013.dat')
oh_lines_tmpfile = StringIO(dumdata.decode('utf8'))
catlines = np.genfromtxt(oh_lines_tmpfile)
# define wavelength and flux as separate arrays
catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0]))
catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2]))
mode = refine_wavecalib_mode - 10 # depends on [control=['if'], data=['refine_wavecalib_mode']]
else:
raise ValueError('Unexpected mode={}'.format(refine_wavecalib_mode))
# initialize output
refined_rectwv_coeff = deepcopy(rectwv_coeff)
logger.info('Computing median spectrum')
# compute median spectrum and normalize it
sp_median = median_slitlets_rectified(input_image, mode=2, minimum_slitlet_width_mm=minimum_slitlet_width_mm, maximum_slitlet_width_mm=maximum_slitlet_width_mm)[0].data
sp_median /= sp_median.max()
# determine minimum and maximum useful wavelength
(jmin, jmax) = find_pix_borders(sp_median, 0)
naxis1 = main_header['naxis1']
naxis2 = main_header['naxis2']
crpix1 = main_header['crpix1']
crval1 = main_header['crval1']
cdelt1 = main_header['cdelt1']
xwave = crval1 + (np.arange(naxis1) + 1.0 - crpix1) * cdelt1
if grism_name == 'LR':
wv_parameters = set_wv_parameters(filter_name, grism_name)
wave_min = wv_parameters['wvmin_useful']
wave_max = wv_parameters['wvmax_useful'] # depends on [control=['if'], data=['grism_name']]
else:
wave_min = crval1 + (jmin + 1 - crpix1) * cdelt1
wave_max = crval1 + (jmax + 1 - crpix1) * cdelt1
logger.info('Setting wave_min to {}'.format(wave_min))
logger.info('Setting wave_max to {}'.format(wave_max))
# extract subset of catalogue lines within current wavelength range
lok1 = catlines_all_wave >= wave_min
lok2 = catlines_all_wave <= wave_max
catlines_reference_wave = catlines_all_wave[lok1 * lok2]
catlines_reference_flux = catlines_all_flux[lok1 * lok2]
catlines_reference_flux /= catlines_reference_flux.max()
# estimate sigma to broaden catalogue lines
csu_config = CsuConfiguration.define_from_header(main_header)
# segregate slitlets
list_useful_slitlets = csu_config.widths_in_range_mm(minwidth=minimum_slitlet_width_mm, maxwidth=maximum_slitlet_width_mm)
# remove missing slitlets
if len(refined_rectwv_coeff.missing_slitlets) > 0:
for iremove in refined_rectwv_coeff.missing_slitlets:
if iremove in list_useful_slitlets:
list_useful_slitlets.remove(iremove) # depends on [control=['if'], data=['iremove', 'list_useful_slitlets']] # depends on [control=['for'], data=['iremove']] # depends on [control=['if'], data=[]]
list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1)) if i not in list_useful_slitlets]
logger.info('list of useful slitlets: {}'.format(list_useful_slitlets))
logger.info('list of not useful slitlets: {}'.format(list_not_useful_slitlets))
tempwidths = np.array([csu_config.csu_bar_slit_width(islitlet) for islitlet in list_useful_slitlets])
widths_summary = summary(tempwidths)
logger.info('Statistics of useful slitlet widths (mm):')
logger.info('- npoints....: {0:d}'.format(widths_summary['npoints']))
logger.info('- mean.......: {0:7.3f}'.format(widths_summary['mean']))
logger.info('- median.....: {0:7.3f}'.format(widths_summary['median']))
logger.info('- std........: {0:7.3f}'.format(widths_summary['std']))
logger.info('- robust_std.: {0:7.3f}'.format(widths_summary['robust_std']))
# empirical transformation of slit width (mm) to pixels
sigma_broadening = cdelt1 * widths_summary['median']
# convolve location of catalogue lines to generate expected spectrum
(xwave_reference, sp_reference) = convolve_comb_lines(catlines_reference_wave, catlines_reference_flux, sigma_broadening, crpix1, crval1, cdelt1, naxis1)
sp_reference /= sp_reference.max()
# generate image2d with expected lines
image2d_expected_lines = np.tile(sp_reference, (naxis2, 1))
hdu = fits.PrimaryHDU(data=image2d_expected_lines, header=main_header)
expected_cat_image = fits.HDUList([hdu])
if abs(debugplot) % 10 != 0 or pdf is not None:
ax = ximplotxy(xwave, sp_median, 'C1-', xlabel='Wavelength (Angstroms, in vacuum)', ylabel='Normalized number of counts', title='Median spectrum', label='observed spectrum', show=False)
# overplot reference catalogue lines
ax.stem(catlines_reference_wave, catlines_reference_flux, 'C4-', markerfmt=' ', basefmt='C4-', label='tabulated lines')
# overplot convolved reference lines
ax.plot(xwave_reference, sp_reference, 'C0-', label='expected spectrum')
ax.legend()
if pdf is not None:
pdf.savefig() # depends on [control=['if'], data=['pdf']]
else:
pause_debugplot(debugplot=debugplot, pltshow=True) # depends on [control=['if'], data=[]]
# compute baseline signal in sp_median
baseline = np.percentile(sp_median[sp_median > 0], q=10)
if abs(debugplot) % 10 != 0 or pdf is not None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(sp_median, bins=1000, log=True)
ax.set_xlabel('Normalized number of counts')
ax.set_ylabel('Number of pixels')
ax.set_title('Median spectrum')
ax.axvline(float(baseline), linestyle='--', color='grey')
if pdf is not None:
pdf.savefig() # depends on [control=['if'], data=['pdf']]
else:
geometry = (0, 0, 640, 480)
set_window_geometry(geometry)
plt.show() # depends on [control=['if'], data=[]]
# subtract baseline to sp_median (only pixels with signal above zero)
lok = np.where(sp_median > 0)
sp_median[lok] -= baseline
# compute global offset through periodic correlation
logger.info('Computing global offset')
(global_offset, fpeak) = periodic_corr1d(sp_reference=sp_reference, sp_offset=sp_median, fminmax=None, naround_zero=50, plottitle='Median spectrum (cross-correlation)', pdf=pdf, debugplot=debugplot)
logger.info('Global offset: {} pixels'.format(-global_offset))
missing_slitlets = rectwv_coeff.missing_slitlets
if mode == 1:
# apply computed offset to obtain refined_rectwv_coeff_global
for islitlet in range(1, EMIR_NBARS + 1):
if islitlet not in missing_slitlets:
i = islitlet - 1
dumdict = refined_rectwv_coeff.contents[i]
dumdict['wpoly_coeff'][0] -= global_offset * cdelt1 # depends on [control=['if'], data=['islitlet']] # depends on [control=['for'], data=['islitlet']] # depends on [control=['if'], data=[]]
elif mode == 2:
# compute individual offset for each slitlet
logger.info('Computing individual offsets')
median_55sp = median_slitlets_rectified(input_image, mode=1)
offset_array = np.zeros(EMIR_NBARS)
xplot = []
yplot = []
xplot_skipped = []
yplot_skipped = []
cout = '0'
for islitlet in range(1, EMIR_NBARS + 1):
if islitlet in list_useful_slitlets:
i = islitlet - 1
sp_median = median_55sp[0].data[i, :]
lok = np.where(sp_median > 0)
if np.any(lok):
baseline = np.percentile(sp_median[lok], q=10)
sp_median[lok] -= baseline
sp_median /= sp_median.max()
(offset_array[i], fpeak) = periodic_corr1d(sp_reference=sp_reference, sp_offset=median_55sp[0].data[i, :], fminmax=None, naround_zero=50, plottitle='slitlet #{0} (cross-correlation)'.format(islitlet), pdf=pdf, debugplot=debugplot) # depends on [control=['if'], data=[]]
else:
offset_array[i] = 0.0
dumdict = refined_rectwv_coeff.contents[i]
dumdict['wpoly_coeff'][0] -= offset_array[i] * cdelt1
xplot.append(islitlet)
yplot.append(-offset_array[i])
# second correction
wpoly_coeff_refined = check_wlcalib_sp(sp=median_55sp[0].data[i, :], crpix1=crpix1, crval1=crval1 - offset_array[i] * cdelt1, cdelt1=cdelt1, wv_master=catlines_reference_wave, coeff_ini=dumdict['wpoly_coeff'], naxis1_ini=EMIR_NAXIS1, title='slitlet #{0} (after applying offset)'.format(islitlet), ylogscale=False, pdf=pdf, debugplot=debugplot)
dumdict['wpoly_coeff'] = wpoly_coeff_refined
cout += '.' # depends on [control=['if'], data=['islitlet']]
else:
xplot_skipped.append(islitlet)
yplot_skipped.append(0)
cout += 'i'
if islitlet % 10 == 0:
if cout != 'i':
cout = str(islitlet // 10) # depends on [control=['if'], data=['cout']] # depends on [control=['if'], data=[]]
logger.info(cout) # depends on [control=['for'], data=['islitlet']]
# show offsets with opposite sign
stat_summary = summary(np.array(yplot))
logger.info('Statistics of individual slitlet offsets (pixels):')
logger.info('- npoints....: {0:d}'.format(stat_summary['npoints']))
logger.info('- mean.......: {0:7.3f}'.format(stat_summary['mean']))
logger.info('- median.....: {0:7.3f}'.format(stat_summary['median']))
logger.info('- std........: {0:7.3f}'.format(stat_summary['std']))
logger.info('- robust_std.: {0:7.3f}'.format(stat_summary['robust_std']))
if abs(debugplot) % 10 != 0 or pdf is not None:
ax = ximplotxy(xplot, yplot, linestyle='', marker='o', color='C0', xlabel='slitlet number', ylabel='-offset (pixels) = offset to be applied', title='cross-correlation result', show=False, **{'label': 'individual slitlets'})
if len(xplot_skipped) > 0:
ax.plot(xplot_skipped, yplot_skipped, 'mx') # depends on [control=['if'], data=[]]
ax.axhline(-global_offset, linestyle='--', color='C1', label='global offset')
ax.legend()
if pdf is not None:
pdf.savefig() # depends on [control=['if'], data=['pdf']]
else:
pause_debugplot(debugplot=debugplot, pltshow=True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('Unexpected mode={}'.format(mode))
# close output PDF file
if pdf is not None:
pdf.close() # depends on [control=['if'], data=['pdf']]
# return result
return (refined_rectwv_coeff, expected_cat_image) |
def join(self, queue_name, min_successes=10, idle_time=100, *, timeout=None):
"""Wait for all the messages on the given queue to be
processed. This method is only meant to be used in tests to
wait for all the messages in a queue to be processed.
Warning:
This method doesn't wait for unacked messages so it may not
be completely reliable. Use the stub broker in your unit
tests and only use this for simple integration tests.
Parameters:
queue_name(str): The queue to wait on.
min_successes(int): The minimum number of times all the
polled queues should be empty.
idle_time(int): The number of milliseconds to wait between
counts.
timeout(Optional[int]): The max amount of time, in
milliseconds, to wait on this queue.
"""
deadline = timeout and time.monotonic() + timeout / 1000
successes = 0
while successes < min_successes:
if deadline and time.monotonic() >= deadline:
raise QueueJoinTimeout(queue_name)
total_messages = sum(self.get_queue_message_counts(queue_name)[:-1])
if total_messages == 0:
successes += 1
else:
successes = 0
self.connection.sleep(idle_time / 1000) | def function[join, parameter[self, queue_name, min_successes, idle_time]]:
constant[Wait for all the messages on the given queue to be
processed. This method is only meant to be used in tests to
wait for all the messages in a queue to be processed.
Warning:
This method doesn't wait for unacked messages so it may not
be completely reliable. Use the stub broker in your unit
tests and only use this for simple integration tests.
Parameters:
queue_name(str): The queue to wait on.
min_successes(int): The minimum number of times all the
polled queues should be empty.
idle_time(int): The number of milliseconds to wait between
counts.
timeout(Optional[int]): The max amount of time, in
milliseconds, to wait on this queue.
]
variable[deadline] assign[=] <ast.BoolOp object at 0x7da1b1837a60>
variable[successes] assign[=] constant[0]
while compare[name[successes] less[<] name[min_successes]] begin[:]
if <ast.BoolOp object at 0x7da1b1837c10> begin[:]
<ast.Raise object at 0x7da1b18371f0>
variable[total_messages] assign[=] call[name[sum], parameter[call[call[name[self].get_queue_message_counts, parameter[name[queue_name]]]][<ast.Slice object at 0x7da1b1837bb0>]]]
if compare[name[total_messages] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b163b6d0>
call[name[self].connection.sleep, parameter[binary_operation[name[idle_time] / constant[1000]]]] | keyword[def] identifier[join] ( identifier[self] , identifier[queue_name] , identifier[min_successes] = literal[int] , identifier[idle_time] = literal[int] ,*, identifier[timeout] = keyword[None] ):
literal[string]
identifier[deadline] = identifier[timeout] keyword[and] identifier[time] . identifier[monotonic] ()+ identifier[timeout] / literal[int]
identifier[successes] = literal[int]
keyword[while] identifier[successes] < identifier[min_successes] :
keyword[if] identifier[deadline] keyword[and] identifier[time] . identifier[monotonic] ()>= identifier[deadline] :
keyword[raise] identifier[QueueJoinTimeout] ( identifier[queue_name] )
identifier[total_messages] = identifier[sum] ( identifier[self] . identifier[get_queue_message_counts] ( identifier[queue_name] )[:- literal[int] ])
keyword[if] identifier[total_messages] == literal[int] :
identifier[successes] += literal[int]
keyword[else] :
identifier[successes] = literal[int]
identifier[self] . identifier[connection] . identifier[sleep] ( identifier[idle_time] / literal[int] ) | def join(self, queue_name, min_successes=10, idle_time=100, *, timeout=None):
"""Wait for all the messages on the given queue to be
processed. This method is only meant to be used in tests to
wait for all the messages in a queue to be processed.
Warning:
This method doesn't wait for unacked messages so it may not
be completely reliable. Use the stub broker in your unit
tests and only use this for simple integration tests.
Parameters:
queue_name(str): The queue to wait on.
min_successes(int): The minimum number of times all the
polled queues should be empty.
idle_time(int): The number of milliseconds to wait between
counts.
timeout(Optional[int]): The max amount of time, in
milliseconds, to wait on this queue.
"""
deadline = timeout and time.monotonic() + timeout / 1000
successes = 0
while successes < min_successes:
if deadline and time.monotonic() >= deadline:
raise QueueJoinTimeout(queue_name) # depends on [control=['if'], data=[]]
total_messages = sum(self.get_queue_message_counts(queue_name)[:-1])
if total_messages == 0:
successes += 1 # depends on [control=['if'], data=[]]
else:
successes = 0
self.connection.sleep(idle_time / 1000) # depends on [control=['while'], data=['successes']] |
def removeRequest(self, service, *args):
"""
Removes a request from the pending request list.
"""
if isinstance(service, RequestWrapper):
if self.logger:
self.logger.debug('Removing request: %s',
self.requests[self.requests.index(service)])
del self.requests[self.requests.index(service)]
return
for request in self.requests:
if request.service == service and request.args == args:
if self.logger:
self.logger.debug('Removing request: %s',
self.requests[self.requests.index(request)])
del self.requests[self.requests.index(request)]
return
raise LookupError("Request not found") | def function[removeRequest, parameter[self, service]]:
constant[
Removes a request from the pending request list.
]
if call[name[isinstance], parameter[name[service], name[RequestWrapper]]] begin[:]
if name[self].logger begin[:]
call[name[self].logger.debug, parameter[constant[Removing request: %s], call[name[self].requests][call[name[self].requests.index, parameter[name[service]]]]]]
<ast.Delete object at 0x7da1b143fa00>
return[None]
for taget[name[request]] in starred[name[self].requests] begin[:]
if <ast.BoolOp object at 0x7da20c992260> begin[:]
if name[self].logger begin[:]
call[name[self].logger.debug, parameter[constant[Removing request: %s], call[name[self].requests][call[name[self].requests.index, parameter[name[request]]]]]]
<ast.Delete object at 0x7da18f813b50>
return[None]
<ast.Raise object at 0x7da1b14347f0> | keyword[def] identifier[removeRequest] ( identifier[self] , identifier[service] ,* identifier[args] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[service] , identifier[RequestWrapper] ):
keyword[if] identifier[self] . identifier[logger] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[requests] [ identifier[self] . identifier[requests] . identifier[index] ( identifier[service] )])
keyword[del] identifier[self] . identifier[requests] [ identifier[self] . identifier[requests] . identifier[index] ( identifier[service] )]
keyword[return]
keyword[for] identifier[request] keyword[in] identifier[self] . identifier[requests] :
keyword[if] identifier[request] . identifier[service] == identifier[service] keyword[and] identifier[request] . identifier[args] == identifier[args] :
keyword[if] identifier[self] . identifier[logger] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[requests] [ identifier[self] . identifier[requests] . identifier[index] ( identifier[request] )])
keyword[del] identifier[self] . identifier[requests] [ identifier[self] . identifier[requests] . identifier[index] ( identifier[request] )]
keyword[return]
keyword[raise] identifier[LookupError] ( literal[string] ) | def removeRequest(self, service, *args):
"""
Removes a request from the pending request list.
"""
if isinstance(service, RequestWrapper):
if self.logger:
self.logger.debug('Removing request: %s', self.requests[self.requests.index(service)]) # depends on [control=['if'], data=[]]
del self.requests[self.requests.index(service)]
return # depends on [control=['if'], data=[]]
for request in self.requests:
if request.service == service and request.args == args:
if self.logger:
self.logger.debug('Removing request: %s', self.requests[self.requests.index(request)]) # depends on [control=['if'], data=[]]
del self.requests[self.requests.index(request)]
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['request']]
raise LookupError('Request not found') |
def memory_dump():
"""
References:
http://stackoverflow.com/questions/141351/how-do-i-find-what-is-using-memory-in-a-python-process-in-a-production-system
"""
import cPickle
dump = open("memory.pickle", 'w')
for obj in gc.get_objects():
i = id(obj)
size = sys.getsizeof(obj, 0)
# referrers = [id(o) for o in gc.get_referrers(obj) if hasattr(o, '__class__')]
referents = [id(o) for o in gc.get_referents(obj) if hasattr(o, '__class__')]
if hasattr(obj, '__class__'):
cls = str(obj.__class__)
cPickle.dump({'id': i, 'class': cls, 'size': size, 'referents': referents}, dump) | def function[memory_dump, parameter[]]:
constant[
References:
http://stackoverflow.com/questions/141351/how-do-i-find-what-is-using-memory-in-a-python-process-in-a-production-system
]
import module[cPickle]
variable[dump] assign[=] call[name[open], parameter[constant[memory.pickle], constant[w]]]
for taget[name[obj]] in starred[call[name[gc].get_objects, parameter[]]] begin[:]
variable[i] assign[=] call[name[id], parameter[name[obj]]]
variable[size] assign[=] call[name[sys].getsizeof, parameter[name[obj], constant[0]]]
variable[referents] assign[=] <ast.ListComp object at 0x7da1b24aec50>
if call[name[hasattr], parameter[name[obj], constant[__class__]]] begin[:]
variable[cls] assign[=] call[name[str], parameter[name[obj].__class__]]
call[name[cPickle].dump, parameter[dictionary[[<ast.Constant object at 0x7da1b24ac6a0>, <ast.Constant object at 0x7da1b24acb20>, <ast.Constant object at 0x7da1b24aca30>, <ast.Constant object at 0x7da1b24ad1b0>], [<ast.Name object at 0x7da1b24acd90>, <ast.Name object at 0x7da1b24af7c0>, <ast.Name object at 0x7da1b24adc00>, <ast.Name object at 0x7da1b24ac160>]], name[dump]]] | keyword[def] identifier[memory_dump] ():
literal[string]
keyword[import] identifier[cPickle]
identifier[dump] = identifier[open] ( literal[string] , literal[string] )
keyword[for] identifier[obj] keyword[in] identifier[gc] . identifier[get_objects] ():
identifier[i] = identifier[id] ( identifier[obj] )
identifier[size] = identifier[sys] . identifier[getsizeof] ( identifier[obj] , literal[int] )
identifier[referents] =[ identifier[id] ( identifier[o] ) keyword[for] identifier[o] keyword[in] identifier[gc] . identifier[get_referents] ( identifier[obj] ) keyword[if] identifier[hasattr] ( identifier[o] , literal[string] )]
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ):
identifier[cls] = identifier[str] ( identifier[obj] . identifier[__class__] )
identifier[cPickle] . identifier[dump] ({ literal[string] : identifier[i] , literal[string] : identifier[cls] , literal[string] : identifier[size] , literal[string] : identifier[referents] }, identifier[dump] ) | def memory_dump():
"""
References:
http://stackoverflow.com/questions/141351/how-do-i-find-what-is-using-memory-in-a-python-process-in-a-production-system
"""
import cPickle
dump = open('memory.pickle', 'w')
for obj in gc.get_objects():
i = id(obj)
size = sys.getsizeof(obj, 0)
# referrers = [id(o) for o in gc.get_referrers(obj) if hasattr(o, '__class__')]
referents = [id(o) for o in gc.get_referents(obj) if hasattr(o, '__class__')]
if hasattr(obj, '__class__'):
cls = str(obj.__class__)
cPickle.dump({'id': i, 'class': cls, 'size': size, 'referents': referents}, dump) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']] |
def pluck(self, value, key=None):
"""
Get a list with the values of a given key.
:rtype: Collection
"""
if key:
return dict(map(lambda x: (data_get(x, key), data_get(x, value)), self.items))
else:
results = list(map(lambda x: data_get(x, value), self.items))
return self.__class__(results) | def function[pluck, parameter[self, value, key]]:
constant[
Get a list with the values of a given key.
:rtype: Collection
]
if name[key] begin[:]
return[call[name[dict], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b054b7f0>, name[self].items]]]]]
return[call[name[self].__class__, parameter[name[results]]]] | keyword[def] identifier[pluck] ( identifier[self] , identifier[value] , identifier[key] = keyword[None] ):
literal[string]
keyword[if] identifier[key] :
keyword[return] identifier[dict] ( identifier[map] ( keyword[lambda] identifier[x] :( identifier[data_get] ( identifier[x] , identifier[key] ), identifier[data_get] ( identifier[x] , identifier[value] )), identifier[self] . identifier[items] ))
keyword[else] :
identifier[results] = identifier[list] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[data_get] ( identifier[x] , identifier[value] ), identifier[self] . identifier[items] ))
keyword[return] identifier[self] . identifier[__class__] ( identifier[results] ) | def pluck(self, value, key=None):
"""
Get a list with the values of a given key.
:rtype: Collection
"""
if key:
return dict(map(lambda x: (data_get(x, key), data_get(x, value)), self.items)) # depends on [control=['if'], data=[]]
else:
results = list(map(lambda x: data_get(x, value), self.items))
return self.__class__(results) |
def environment_variables_for_task(task):
"""
This will build a dict with all the environment variables that
should be present when running a build or deployment.
:param task: A dict of the json payload with information about
the build task.
:return: A dict of environment variables.
"""
env = {
'CI': 'frigg',
'FRIGG': 'true',
'FRIGG_CI': 'true',
'GH_TOKEN': task['gh_token'],
'FRIGG_BUILD_BRANCH': task['branch'],
'FRIGG_BUILD_COMMIT_HASH': task['sha'],
'FRIGG_BUILD_DIR': '~/builds/{0}'.format(task['id']),
'FRIGG_BUILD_ID': task['id'],
'FRIGG_DOCKER_IMAGE': task['image'],
'FRIGG_WORKER': socket.getfqdn(),
}
if 'pull_request_id' in task:
env['FRIGG_PULL_REQUEST_ID'] = task['pull_request_id']
if 'build_number' in task:
env['FRIGG_BUILD_NUMBER'] = task['build_number']
if 'secrets' in task:
env.update(task['secrets'])
if 'environment_variables' in task:
env.update(task['environment_variables'])
return env | def function[environment_variables_for_task, parameter[task]]:
constant[
This will build a dict with all the environment variables that
should be present when running a build or deployment.
:param task: A dict of the json payload with information about
the build task.
:return: A dict of environment variables.
]
variable[env] assign[=] dictionary[[<ast.Constant object at 0x7da18eb56890>, <ast.Constant object at 0x7da18eb55ae0>, <ast.Constant object at 0x7da18eb56a40>, <ast.Constant object at 0x7da18eb56ce0>, <ast.Constant object at 0x7da18eb559c0>, <ast.Constant object at 0x7da18eb56dd0>, <ast.Constant object at 0x7da18eb57310>, <ast.Constant object at 0x7da18eb55450>, <ast.Constant object at 0x7da18eb54f70>, <ast.Constant object at 0x7da18eb57040>], [<ast.Constant object at 0x7da18eb54220>, <ast.Constant object at 0x7da18eb54520>, <ast.Constant object at 0x7da18eb56fb0>, <ast.Subscript object at 0x7da18eb56d10>, <ast.Subscript object at 0x7da18eb55090>, <ast.Subscript object at 0x7da18eb55600>, <ast.Call object at 0x7da18eb558d0>, <ast.Subscript object at 0x7da18eb541c0>, <ast.Subscript object at 0x7da18eb54a60>, <ast.Call object at 0x7da18eb55b40>]]
if compare[constant[pull_request_id] in name[task]] begin[:]
call[name[env]][constant[FRIGG_PULL_REQUEST_ID]] assign[=] call[name[task]][constant[pull_request_id]]
if compare[constant[build_number] in name[task]] begin[:]
call[name[env]][constant[FRIGG_BUILD_NUMBER]] assign[=] call[name[task]][constant[build_number]]
if compare[constant[secrets] in name[task]] begin[:]
call[name[env].update, parameter[call[name[task]][constant[secrets]]]]
if compare[constant[environment_variables] in name[task]] begin[:]
call[name[env].update, parameter[call[name[task]][constant[environment_variables]]]]
return[name[env]] | keyword[def] identifier[environment_variables_for_task] ( identifier[task] ):
literal[string]
identifier[env] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[task] [ literal[string] ],
literal[string] : identifier[task] [ literal[string] ],
literal[string] : identifier[task] [ literal[string] ],
literal[string] : literal[string] . identifier[format] ( identifier[task] [ literal[string] ]),
literal[string] : identifier[task] [ literal[string] ],
literal[string] : identifier[task] [ literal[string] ],
literal[string] : identifier[socket] . identifier[getfqdn] (),
}
keyword[if] literal[string] keyword[in] identifier[task] :
identifier[env] [ literal[string] ]= identifier[task] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[task] :
identifier[env] [ literal[string] ]= identifier[task] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[task] :
identifier[env] . identifier[update] ( identifier[task] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[task] :
identifier[env] . identifier[update] ( identifier[task] [ literal[string] ])
keyword[return] identifier[env] | def environment_variables_for_task(task):
"""
This will build a dict with all the environment variables that
should be present when running a build or deployment.
:param task: A dict of the json payload with information about
the build task.
:return: A dict of environment variables.
"""
env = {'CI': 'frigg', 'FRIGG': 'true', 'FRIGG_CI': 'true', 'GH_TOKEN': task['gh_token'], 'FRIGG_BUILD_BRANCH': task['branch'], 'FRIGG_BUILD_COMMIT_HASH': task['sha'], 'FRIGG_BUILD_DIR': '~/builds/{0}'.format(task['id']), 'FRIGG_BUILD_ID': task['id'], 'FRIGG_DOCKER_IMAGE': task['image'], 'FRIGG_WORKER': socket.getfqdn()}
if 'pull_request_id' in task:
env['FRIGG_PULL_REQUEST_ID'] = task['pull_request_id'] # depends on [control=['if'], data=['task']]
if 'build_number' in task:
env['FRIGG_BUILD_NUMBER'] = task['build_number'] # depends on [control=['if'], data=['task']]
if 'secrets' in task:
env.update(task['secrets']) # depends on [control=['if'], data=['task']]
if 'environment_variables' in task:
env.update(task['environment_variables']) # depends on [control=['if'], data=['task']]
return env |
def plot_job_history(jobs, interval='year'):
"""Plots the job history of the user from the given list of jobs.
Args:
jobs (list): A list of jobs with type IBMQjob.
interval (str): Interval over which to examine.
Returns:
fig: A Matplotlib figure instance.
"""
def get_date(job):
"""Returns a datetime object from a IBMQJob instance.
Args:
job (IBMQJob): A job.
Returns:
dt: A datetime object.
"""
return datetime.datetime.strptime(job.creation_date(),
'%Y-%m-%dT%H:%M:%S.%fZ')
current_time = datetime.datetime.now()
if interval == 'year':
bins = [(current_time - datetime.timedelta(days=k*365/12))
for k in range(12)]
elif interval == 'month':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(30)]
elif interval == 'week':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(7)]
binned_jobs = [0]*len(bins)
if interval == 'year':
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
else:
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.day == dat.day and date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
nz_bins = []
nz_idx = []
for ind, val in enumerate(binned_jobs):
if val != 0:
nz_idx.append(ind)
nz_bins.append(val)
total_jobs = sum(binned_jobs)
colors = ['#003f5c', '#ffa600', '#374c80', '#ff764a',
'#7a5195', '#ef5675', '#bc5090']
if interval == 'year':
labels = ['{}-{}'.format(str(bins[b].year)[2:], bins[b].month) for b in nz_idx]
else:
labels = ['{}-{}'.format(bins[b].month, bins[b].day) for b in nz_idx]
fig, ax = plt.subplots(1, 1, figsize=(5, 5)) # pylint: disable=invalid-name
ax.pie(nz_bins[::-1], labels=labels, colors=colors, textprops={'fontsize': 14},
rotatelabels=True, counterclock=False)
ax.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))
ax.text(0, 0, total_jobs, horizontalalignment='center',
verticalalignment='center', fontsize=26)
fig.tight_layout()
return fig | def function[plot_job_history, parameter[jobs, interval]]:
constant[Plots the job history of the user from the given list of jobs.
Args:
jobs (list): A list of jobs with type IBMQjob.
interval (str): Interval over which to examine.
Returns:
fig: A Matplotlib figure instance.
]
def function[get_date, parameter[job]]:
constant[Returns a datetime object from a IBMQJob instance.
Args:
job (IBMQJob): A job.
Returns:
dt: A datetime object.
]
return[call[name[datetime].datetime.strptime, parameter[call[name[job].creation_date, parameter[]], constant[%Y-%m-%dT%H:%M:%S.%fZ]]]]
variable[current_time] assign[=] call[name[datetime].datetime.now, parameter[]]
if compare[name[interval] equal[==] constant[year]] begin[:]
variable[bins] assign[=] <ast.ListComp object at 0x7da1b03175b0>
variable[binned_jobs] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0314d90>]] * call[name[len], parameter[name[bins]]]]
if compare[name[interval] equal[==] constant[year]] begin[:]
for taget[name[job]] in starred[name[jobs]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0315000>, <ast.Name object at 0x7da1b0315030>]]] in starred[call[name[enumerate], parameter[name[bins]]]] begin[:]
variable[date] assign[=] call[name[get_date], parameter[name[job]]]
if compare[name[date].month equal[==] name[dat].month] begin[:]
<ast.AugAssign object at 0x7da1b0315300>
break
variable[nz_bins] assign[=] list[[]]
variable[nz_idx] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0315cf0>, <ast.Name object at 0x7da1b0315d20>]]] in starred[call[name[enumerate], parameter[name[binned_jobs]]]] begin[:]
if compare[name[val] not_equal[!=] constant[0]] begin[:]
call[name[nz_idx].append, parameter[name[ind]]]
call[name[nz_bins].append, parameter[name[val]]]
variable[total_jobs] assign[=] call[name[sum], parameter[name[binned_jobs]]]
variable[colors] assign[=] list[[<ast.Constant object at 0x7da1b03165f0>, <ast.Constant object at 0x7da1b0316620>, <ast.Constant object at 0x7da1b0316650>, <ast.Constant object at 0x7da1b0316680>, <ast.Constant object at 0x7da1b03166b0>, <ast.Constant object at 0x7da1b03166e0>, <ast.Constant object at 0x7da1b0316710>]]
if compare[name[interval] equal[==] constant[year]] begin[:]
variable[labels] assign[=] <ast.ListComp object at 0x7da1b0316860>
<ast.Tuple object at 0x7da1b05f80d0> assign[=] call[name[plt].subplots, parameter[constant[1], constant[1]]]
call[name[ax].pie, parameter[call[name[nz_bins]][<ast.Slice object at 0x7da1b05fac20>]]]
call[name[ax].add_artist, parameter[call[name[Circle], parameter[tuple[[<ast.Constant object at 0x7da1b0396650>, <ast.Constant object at 0x7da1b0396680>]], constant[0.7]]]]]
call[name[ax].text, parameter[constant[0], constant[0], name[total_jobs]]]
call[name[fig].tight_layout, parameter[]]
return[name[fig]] | keyword[def] identifier[plot_job_history] ( identifier[jobs] , identifier[interval] = literal[string] ):
literal[string]
keyword[def] identifier[get_date] ( identifier[job] ):
literal[string]
keyword[return] identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[job] . identifier[creation_date] (),
literal[string] )
identifier[current_time] = identifier[datetime] . identifier[datetime] . identifier[now] ()
keyword[if] identifier[interval] == literal[string] :
identifier[bins] =[( identifier[current_time] - identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[k] * literal[int] / literal[int] ))
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] )]
keyword[elif] identifier[interval] == literal[string] :
identifier[bins] =[( identifier[current_time] - identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[k] )) keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] )]
keyword[elif] identifier[interval] == literal[string] :
identifier[bins] =[( identifier[current_time] - identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[k] )) keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] )]
identifier[binned_jobs] =[ literal[int] ]* identifier[len] ( identifier[bins] )
keyword[if] identifier[interval] == literal[string] :
keyword[for] identifier[job] keyword[in] identifier[jobs] :
keyword[for] identifier[ind] , identifier[dat] keyword[in] identifier[enumerate] ( identifier[bins] ):
identifier[date] = identifier[get_date] ( identifier[job] )
keyword[if] identifier[date] . identifier[month] == identifier[dat] . identifier[month] :
identifier[binned_jobs] [ identifier[ind] ]+= literal[int]
keyword[break]
keyword[else] :
keyword[continue]
keyword[else] :
keyword[for] identifier[job] keyword[in] identifier[jobs] :
keyword[for] identifier[ind] , identifier[dat] keyword[in] identifier[enumerate] ( identifier[bins] ):
identifier[date] = identifier[get_date] ( identifier[job] )
keyword[if] identifier[date] . identifier[day] == identifier[dat] . identifier[day] keyword[and] identifier[date] . identifier[month] == identifier[dat] . identifier[month] :
identifier[binned_jobs] [ identifier[ind] ]+= literal[int]
keyword[break]
keyword[else] :
keyword[continue]
identifier[nz_bins] =[]
identifier[nz_idx] =[]
keyword[for] identifier[ind] , identifier[val] keyword[in] identifier[enumerate] ( identifier[binned_jobs] ):
keyword[if] identifier[val] != literal[int] :
identifier[nz_idx] . identifier[append] ( identifier[ind] )
identifier[nz_bins] . identifier[append] ( identifier[val] )
identifier[total_jobs] = identifier[sum] ( identifier[binned_jobs] )
identifier[colors] =[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[interval] == literal[string] :
identifier[labels] =[ literal[string] . identifier[format] ( identifier[str] ( identifier[bins] [ identifier[b] ]. identifier[year] )[ literal[int] :], identifier[bins] [ identifier[b] ]. identifier[month] ) keyword[for] identifier[b] keyword[in] identifier[nz_idx] ]
keyword[else] :
identifier[labels] =[ literal[string] . identifier[format] ( identifier[bins] [ identifier[b] ]. identifier[month] , identifier[bins] [ identifier[b] ]. identifier[day] ) keyword[for] identifier[b] keyword[in] identifier[nz_idx] ]
identifier[fig] , identifier[ax] = identifier[plt] . identifier[subplots] ( literal[int] , literal[int] , identifier[figsize] =( literal[int] , literal[int] ))
identifier[ax] . identifier[pie] ( identifier[nz_bins] [::- literal[int] ], identifier[labels] = identifier[labels] , identifier[colors] = identifier[colors] , identifier[textprops] ={ literal[string] : literal[int] },
identifier[rotatelabels] = keyword[True] , identifier[counterclock] = keyword[False] )
identifier[ax] . identifier[add_artist] ( identifier[Circle] (( literal[int] , literal[int] ), literal[int] , identifier[color] = literal[string] , identifier[zorder] = literal[int] ))
identifier[ax] . identifier[text] ( literal[int] , literal[int] , identifier[total_jobs] , identifier[horizontalalignment] = literal[string] ,
identifier[verticalalignment] = literal[string] , identifier[fontsize] = literal[int] )
identifier[fig] . identifier[tight_layout] ()
keyword[return] identifier[fig] | def plot_job_history(jobs, interval='year'):
"""Plots the job history of the user from the given list of jobs.
Args:
jobs (list): A list of jobs with type IBMQjob.
interval (str): Interval over which to examine.
Returns:
fig: A Matplotlib figure instance.
"""
def get_date(job):
"""Returns a datetime object from a IBMQJob instance.
Args:
job (IBMQJob): A job.
Returns:
dt: A datetime object.
"""
return datetime.datetime.strptime(job.creation_date(), '%Y-%m-%dT%H:%M:%S.%fZ')
current_time = datetime.datetime.now()
if interval == 'year':
bins = [current_time - datetime.timedelta(days=k * 365 / 12) for k in range(12)] # depends on [control=['if'], data=[]]
elif interval == 'month':
bins = [current_time - datetime.timedelta(days=k) for k in range(30)] # depends on [control=['if'], data=[]]
elif interval == 'week':
bins = [current_time - datetime.timedelta(days=k) for k in range(7)] # depends on [control=['if'], data=[]]
binned_jobs = [0] * len(bins)
if interval == 'year':
for job in jobs:
for (ind, dat) in enumerate(bins):
date = get_date(job)
if date.month == dat.month:
binned_jobs[ind] += 1
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
continue # depends on [control=['for'], data=['job']] # depends on [control=['if'], data=[]]
else:
for job in jobs:
for (ind, dat) in enumerate(bins):
date = get_date(job)
if date.day == dat.day and date.month == dat.month:
binned_jobs[ind] += 1
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
else:
continue # depends on [control=['for'], data=['job']]
nz_bins = []
nz_idx = []
for (ind, val) in enumerate(binned_jobs):
if val != 0:
nz_idx.append(ind)
nz_bins.append(val) # depends on [control=['if'], data=['val']] # depends on [control=['for'], data=[]]
total_jobs = sum(binned_jobs)
colors = ['#003f5c', '#ffa600', '#374c80', '#ff764a', '#7a5195', '#ef5675', '#bc5090']
if interval == 'year':
labels = ['{}-{}'.format(str(bins[b].year)[2:], bins[b].month) for b in nz_idx] # depends on [control=['if'], data=[]]
else:
labels = ['{}-{}'.format(bins[b].month, bins[b].day) for b in nz_idx]
(fig, ax) = plt.subplots(1, 1, figsize=(5, 5)) # pylint: disable=invalid-name
ax.pie(nz_bins[::-1], labels=labels, colors=colors, textprops={'fontsize': 14}, rotatelabels=True, counterclock=False)
ax.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))
ax.text(0, 0, total_jobs, horizontalalignment='center', verticalalignment='center', fontsize=26)
fig.tight_layout()
return fig |
def to_file(self, slug, folderpath=None, header=None, footer=None):
"""
Writes the html report to a file from the report stack
"""
if folderpath is None:
if self.report_path is None:
self.err(
"Please set the report_path parameter or pass a path in arguments")
return
folderpath = self.report_path
else:
self.report_path = folderpath
html = self._get_header(header)
if html is None or html == "":
self.err(self.to_file, "Can not get html header")
for report in self.reports:
if "html" not in report:
self.err("No html for report " + report)
self.reports = self.report_engines = []
return
html += report["html"]
html += self._get_footer(footer)
try:
path = self._write_file(slug, folderpath, html)
path = "file://" + path
except Exception as e:
self.err(e, self.to_file, "Can not save report to file")
return
self.reports = []
self.report_engines = []
if self.notebook is True:
link = '<a href="' + path + '" target="_blank">' + path + '</a>'
return display(HTML(link)) | def function[to_file, parameter[self, slug, folderpath, header, footer]]:
constant[
Writes the html report to a file from the report stack
]
if compare[name[folderpath] is constant[None]] begin[:]
if compare[name[self].report_path is constant[None]] begin[:]
call[name[self].err, parameter[constant[Please set the report_path parameter or pass a path in arguments]]]
return[None]
variable[folderpath] assign[=] name[self].report_path
variable[html] assign[=] call[name[self]._get_header, parameter[name[header]]]
if <ast.BoolOp object at 0x7da204623130> begin[:]
call[name[self].err, parameter[name[self].to_file, constant[Can not get html header]]]
for taget[name[report]] in starred[name[self].reports] begin[:]
if compare[constant[html] <ast.NotIn object at 0x7da2590d7190> name[report]] begin[:]
call[name[self].err, parameter[binary_operation[constant[No html for report ] + name[report]]]]
name[self].reports assign[=] list[[]]
return[None]
<ast.AugAssign object at 0x7da204620d00>
<ast.AugAssign object at 0x7da204620340>
<ast.Try object at 0x7da204622170>
name[self].reports assign[=] list[[]]
name[self].report_engines assign[=] list[[]]
if compare[name[self].notebook is constant[True]] begin[:]
variable[link] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[<a href="] + name[path]] + constant[" target="_blank">]] + name[path]] + constant[</a>]]
return[call[name[display], parameter[call[name[HTML], parameter[name[link]]]]]] | keyword[def] identifier[to_file] ( identifier[self] , identifier[slug] , identifier[folderpath] = keyword[None] , identifier[header] = keyword[None] , identifier[footer] = keyword[None] ):
literal[string]
keyword[if] identifier[folderpath] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[report_path] keyword[is] keyword[None] :
identifier[self] . identifier[err] (
literal[string] )
keyword[return]
identifier[folderpath] = identifier[self] . identifier[report_path]
keyword[else] :
identifier[self] . identifier[report_path] = identifier[folderpath]
identifier[html] = identifier[self] . identifier[_get_header] ( identifier[header] )
keyword[if] identifier[html] keyword[is] keyword[None] keyword[or] identifier[html] == literal[string] :
identifier[self] . identifier[err] ( identifier[self] . identifier[to_file] , literal[string] )
keyword[for] identifier[report] keyword[in] identifier[self] . identifier[reports] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[report] :
identifier[self] . identifier[err] ( literal[string] + identifier[report] )
identifier[self] . identifier[reports] = identifier[self] . identifier[report_engines] =[]
keyword[return]
identifier[html] += identifier[report] [ literal[string] ]
identifier[html] += identifier[self] . identifier[_get_footer] ( identifier[footer] )
keyword[try] :
identifier[path] = identifier[self] . identifier[_write_file] ( identifier[slug] , identifier[folderpath] , identifier[html] )
identifier[path] = literal[string] + identifier[path]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[err] ( identifier[e] , identifier[self] . identifier[to_file] , literal[string] )
keyword[return]
identifier[self] . identifier[reports] =[]
identifier[self] . identifier[report_engines] =[]
keyword[if] identifier[self] . identifier[notebook] keyword[is] keyword[True] :
identifier[link] = literal[string] + identifier[path] + literal[string] + identifier[path] + literal[string]
keyword[return] identifier[display] ( identifier[HTML] ( identifier[link] )) | def to_file(self, slug, folderpath=None, header=None, footer=None):
"""
Writes the html report to a file from the report stack
"""
if folderpath is None:
if self.report_path is None:
self.err('Please set the report_path parameter or pass a path in arguments')
return # depends on [control=['if'], data=[]]
folderpath = self.report_path # depends on [control=['if'], data=['folderpath']]
else:
self.report_path = folderpath
html = self._get_header(header)
if html is None or html == '':
self.err(self.to_file, 'Can not get html header') # depends on [control=['if'], data=[]]
for report in self.reports:
if 'html' not in report:
self.err('No html for report ' + report)
self.reports = self.report_engines = []
return # depends on [control=['if'], data=['report']]
html += report['html'] # depends on [control=['for'], data=['report']]
html += self._get_footer(footer)
try:
path = self._write_file(slug, folderpath, html)
path = 'file://' + path # depends on [control=['try'], data=[]]
except Exception as e:
self.err(e, self.to_file, 'Can not save report to file')
return # depends on [control=['except'], data=['e']]
self.reports = []
self.report_engines = []
if self.notebook is True:
link = '<a href="' + path + '" target="_blank">' + path + '</a>'
return display(HTML(link)) # depends on [control=['if'], data=[]] |
def get_class_properties(self, dev_class, class_prop):
"""
get_class_properties(self, dev_class, class_prop) -> None
Returns the class properties
Parameters :
- dev_class : (DeviceClass) the DeviceClass object
- class_prop : [in, out] (dict<str, None>) the property names. Will be filled
with property values
Return : None"""
# initialize default values
if class_prop == {} or not Util._UseDb:
return
# call database to get properties
props = self.db.get_class_property(dev_class.get_name(), list(class_prop.keys()))
# if value defined in database, store it
for name in class_prop:
if props[name]:
type = self.get_property_type(name, class_prop)
values = self.stringArray2values(props[name], type)
self.set_property_values(name, class_prop, values)
else:
print(name + " property NOT found in database") | def function[get_class_properties, parameter[self, dev_class, class_prop]]:
constant[
get_class_properties(self, dev_class, class_prop) -> None
Returns the class properties
Parameters :
- dev_class : (DeviceClass) the DeviceClass object
- class_prop : [in, out] (dict<str, None>) the property names. Will be filled
with property values
Return : None]
if <ast.BoolOp object at 0x7da18dc99ff0> begin[:]
return[None]
variable[props] assign[=] call[name[self].db.get_class_property, parameter[call[name[dev_class].get_name, parameter[]], call[name[list], parameter[call[name[class_prop].keys, parameter[]]]]]]
for taget[name[name]] in starred[name[class_prop]] begin[:]
if call[name[props]][name[name]] begin[:]
variable[type] assign[=] call[name[self].get_property_type, parameter[name[name], name[class_prop]]]
variable[values] assign[=] call[name[self].stringArray2values, parameter[call[name[props]][name[name]], name[type]]]
call[name[self].set_property_values, parameter[name[name], name[class_prop], name[values]]] | keyword[def] identifier[get_class_properties] ( identifier[self] , identifier[dev_class] , identifier[class_prop] ):
literal[string]
keyword[if] identifier[class_prop] =={} keyword[or] keyword[not] identifier[Util] . identifier[_UseDb] :
keyword[return]
identifier[props] = identifier[self] . identifier[db] . identifier[get_class_property] ( identifier[dev_class] . identifier[get_name] (), identifier[list] ( identifier[class_prop] . identifier[keys] ()))
keyword[for] identifier[name] keyword[in] identifier[class_prop] :
keyword[if] identifier[props] [ identifier[name] ]:
identifier[type] = identifier[self] . identifier[get_property_type] ( identifier[name] , identifier[class_prop] )
identifier[values] = identifier[self] . identifier[stringArray2values] ( identifier[props] [ identifier[name] ], identifier[type] )
identifier[self] . identifier[set_property_values] ( identifier[name] , identifier[class_prop] , identifier[values] )
keyword[else] :
identifier[print] ( identifier[name] + literal[string] ) | def get_class_properties(self, dev_class, class_prop):
"""
get_class_properties(self, dev_class, class_prop) -> None
Returns the class properties
Parameters :
- dev_class : (DeviceClass) the DeviceClass object
- class_prop : [in, out] (dict<str, None>) the property names. Will be filled
with property values
Return : None"""
# initialize default values
if class_prop == {} or not Util._UseDb:
return # depends on [control=['if'], data=[]]
# call database to get properties
props = self.db.get_class_property(dev_class.get_name(), list(class_prop.keys()))
# if value defined in database, store it
for name in class_prop:
if props[name]:
type = self.get_property_type(name, class_prop)
values = self.stringArray2values(props[name], type)
self.set_property_values(name, class_prop, values) # depends on [control=['if'], data=[]]
else:
print(name + ' property NOT found in database') # depends on [control=['for'], data=['name']] |
def getShocks(self):
'''
Gets permanent and transitory shocks (combining idiosyncratic and aggregate shocks), but
only consumers who update their macroeconomic beliefs this period incorporate all pre-
viously unnoticed aggregate permanent shocks. Agents correctly observe the level of all
real variables (market resources, consumption, assets, etc), but misperceive the aggregate
productivity level.
Parameters
----------
None
Returns
-------
None
'''
# The strange syntax here is so that both StickyEconsumerType and StickyEmarkovConsumerType
# run the getShocks method of their first superclass: AggShockConsumerType and
# AggShockMarkovConsumerType respectively. This will be simplified in Python 3.
super(self.__class__,self).getShocks() # Get permanent and transitory combined shocks
newborns = self.t_age == 0
self.TranShkNow[newborns] = self.TranShkAggNow*self.wRteNow # Turn off idiosyncratic shocks for newborns
self.PermShkNow[newborns] = self.PermShkAggNow
self.getUpdaters() # Randomly draw which agents will update their beliefs
# Calculate innovation to the productivity level perception error
pLvlErrNew = self.getpLvlError()
self.pLvlErrNow *= pLvlErrNew # Perception error accumulation
# Calculate (mis)perceptions of the permanent shock
PermShkPcvd = self.PermShkNow/pLvlErrNew
PermShkPcvd[self.update] *= self.pLvlErrNow[self.update] # Updaters see the true permanent shock and all missed news
self.pLvlErrNow[self.update] = 1.0
self.PermShkNow = PermShkPcvd | def function[getShocks, parameter[self]]:
constant[
Gets permanent and transitory shocks (combining idiosyncratic and aggregate shocks), but
only consumers who update their macroeconomic beliefs this period incorporate all pre-
viously unnoticed aggregate permanent shocks. Agents correctly observe the level of all
real variables (market resources, consumption, assets, etc), but misperceive the aggregate
productivity level.
Parameters
----------
None
Returns
-------
None
]
call[call[name[super], parameter[name[self].__class__, name[self]]].getShocks, parameter[]]
variable[newborns] assign[=] compare[name[self].t_age equal[==] constant[0]]
call[name[self].TranShkNow][name[newborns]] assign[=] binary_operation[name[self].TranShkAggNow * name[self].wRteNow]
call[name[self].PermShkNow][name[newborns]] assign[=] name[self].PermShkAggNow
call[name[self].getUpdaters, parameter[]]
variable[pLvlErrNew] assign[=] call[name[self].getpLvlError, parameter[]]
<ast.AugAssign object at 0x7da204564850>
variable[PermShkPcvd] assign[=] binary_operation[name[self].PermShkNow / name[pLvlErrNew]]
<ast.AugAssign object at 0x7da204566f50>
call[name[self].pLvlErrNow][name[self].update] assign[=] constant[1.0]
name[self].PermShkNow assign[=] name[PermShkPcvd] | keyword[def] identifier[getShocks] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[self] . identifier[__class__] , identifier[self] ). identifier[getShocks] ()
identifier[newborns] = identifier[self] . identifier[t_age] == literal[int]
identifier[self] . identifier[TranShkNow] [ identifier[newborns] ]= identifier[self] . identifier[TranShkAggNow] * identifier[self] . identifier[wRteNow]
identifier[self] . identifier[PermShkNow] [ identifier[newborns] ]= identifier[self] . identifier[PermShkAggNow]
identifier[self] . identifier[getUpdaters] ()
identifier[pLvlErrNew] = identifier[self] . identifier[getpLvlError] ()
identifier[self] . identifier[pLvlErrNow] *= identifier[pLvlErrNew]
identifier[PermShkPcvd] = identifier[self] . identifier[PermShkNow] / identifier[pLvlErrNew]
identifier[PermShkPcvd] [ identifier[self] . identifier[update] ]*= identifier[self] . identifier[pLvlErrNow] [ identifier[self] . identifier[update] ]
identifier[self] . identifier[pLvlErrNow] [ identifier[self] . identifier[update] ]= literal[int]
identifier[self] . identifier[PermShkNow] = identifier[PermShkPcvd] | def getShocks(self):
"""
Gets permanent and transitory shocks (combining idiosyncratic and aggregate shocks), but
only consumers who update their macroeconomic beliefs this period incorporate all pre-
viously unnoticed aggregate permanent shocks. Agents correctly observe the level of all
real variables (market resources, consumption, assets, etc), but misperceive the aggregate
productivity level.
Parameters
----------
None
Returns
-------
None
"""
# The strange syntax here is so that both StickyEconsumerType and StickyEmarkovConsumerType
# run the getShocks method of their first superclass: AggShockConsumerType and
# AggShockMarkovConsumerType respectively. This will be simplified in Python 3.
super(self.__class__, self).getShocks() # Get permanent and transitory combined shocks
newborns = self.t_age == 0
self.TranShkNow[newborns] = self.TranShkAggNow * self.wRteNow # Turn off idiosyncratic shocks for newborns
self.PermShkNow[newborns] = self.PermShkAggNow
self.getUpdaters() # Randomly draw which agents will update their beliefs
# Calculate innovation to the productivity level perception error
pLvlErrNew = self.getpLvlError()
self.pLvlErrNow *= pLvlErrNew # Perception error accumulation
# Calculate (mis)perceptions of the permanent shock
PermShkPcvd = self.PermShkNow / pLvlErrNew
PermShkPcvd[self.update] *= self.pLvlErrNow[self.update] # Updaters see the true permanent shock and all missed news
self.pLvlErrNow[self.update] = 1.0
self.PermShkNow = PermShkPcvd |
def createWCSObject(output,default_wcs,imageObjectList):
"""Converts a PyWCS WCS object into a WCSObject(baseImageObject) instance."""
from . import imageObject
outwcs = imageObject.WCSObject(output)
outwcs.default_wcs = default_wcs
outwcs.wcs = default_wcs.copy()
outwcs.final_wcs = default_wcs.copy()
outwcs.single_wcs = default_wcs.copy()
outwcs.updateContextImage(imageObjectList[0].createContext)
#
# Add exptime information for use with drizzle
#
outwcs._exptime,outwcs._expstart,outwcs._expend = util.compute_texptime(imageObjectList)
outwcs.nimages = util.countImages(imageObjectList)
return outwcs | def function[createWCSObject, parameter[output, default_wcs, imageObjectList]]:
constant[Converts a PyWCS WCS object into a WCSObject(baseImageObject) instance.]
from relative_module[None] import module[imageObject]
variable[outwcs] assign[=] call[name[imageObject].WCSObject, parameter[name[output]]]
name[outwcs].default_wcs assign[=] name[default_wcs]
name[outwcs].wcs assign[=] call[name[default_wcs].copy, parameter[]]
name[outwcs].final_wcs assign[=] call[name[default_wcs].copy, parameter[]]
name[outwcs].single_wcs assign[=] call[name[default_wcs].copy, parameter[]]
call[name[outwcs].updateContextImage, parameter[call[name[imageObjectList]][constant[0]].createContext]]
<ast.Tuple object at 0x7da1b1a7e590> assign[=] call[name[util].compute_texptime, parameter[name[imageObjectList]]]
name[outwcs].nimages assign[=] call[name[util].countImages, parameter[name[imageObjectList]]]
return[name[outwcs]] | keyword[def] identifier[createWCSObject] ( identifier[output] , identifier[default_wcs] , identifier[imageObjectList] ):
literal[string]
keyword[from] . keyword[import] identifier[imageObject]
identifier[outwcs] = identifier[imageObject] . identifier[WCSObject] ( identifier[output] )
identifier[outwcs] . identifier[default_wcs] = identifier[default_wcs]
identifier[outwcs] . identifier[wcs] = identifier[default_wcs] . identifier[copy] ()
identifier[outwcs] . identifier[final_wcs] = identifier[default_wcs] . identifier[copy] ()
identifier[outwcs] . identifier[single_wcs] = identifier[default_wcs] . identifier[copy] ()
identifier[outwcs] . identifier[updateContextImage] ( identifier[imageObjectList] [ literal[int] ]. identifier[createContext] )
identifier[outwcs] . identifier[_exptime] , identifier[outwcs] . identifier[_expstart] , identifier[outwcs] . identifier[_expend] = identifier[util] . identifier[compute_texptime] ( identifier[imageObjectList] )
identifier[outwcs] . identifier[nimages] = identifier[util] . identifier[countImages] ( identifier[imageObjectList] )
keyword[return] identifier[outwcs] | def createWCSObject(output, default_wcs, imageObjectList):
"""Converts a PyWCS WCS object into a WCSObject(baseImageObject) instance."""
from . import imageObject
outwcs = imageObject.WCSObject(output)
outwcs.default_wcs = default_wcs
outwcs.wcs = default_wcs.copy()
outwcs.final_wcs = default_wcs.copy()
outwcs.single_wcs = default_wcs.copy()
outwcs.updateContextImage(imageObjectList[0].createContext)
#
# Add exptime information for use with drizzle
#
(outwcs._exptime, outwcs._expstart, outwcs._expend) = util.compute_texptime(imageObjectList)
outwcs.nimages = util.countImages(imageObjectList)
return outwcs |
def calc_format_info(version, error, mask_pattern):
"""\
Returns the format information for the provided error level and mask patttern.
ISO/IEC 18004:2015(E) -- 7.9 Format information (page 55)
ISO/IEC 18004:2015(E) -- Table C.1 — Valid format information bit sequences (page 80)
:param int version: Version constant
:param int error: Error level constant.
:param int mask_pattern: Mask pattern number.
"""
fmt = mask_pattern
if version > 0:
if error == consts.ERROR_LEVEL_L:
fmt += 0x08
elif error == consts.ERROR_LEVEL_H:
fmt += 0x10
elif error == consts.ERROR_LEVEL_Q:
fmt += 0x18
format_info = consts.FORMAT_INFO[fmt]
else:
fmt += consts.ERROR_LEVEL_TO_MICRO_MAPPING[version][error] << 2
format_info = consts.FORMAT_INFO_MICRO[fmt]
return format_info | def function[calc_format_info, parameter[version, error, mask_pattern]]:
constant[ Returns the format information for the provided error level and mask patttern.
ISO/IEC 18004:2015(E) -- 7.9 Format information (page 55)
ISO/IEC 18004:2015(E) -- Table C.1 — Valid format information bit sequences (page 80)
:param int version: Version constant
:param int error: Error level constant.
:param int mask_pattern: Mask pattern number.
]
variable[fmt] assign[=] name[mask_pattern]
if compare[name[version] greater[>] constant[0]] begin[:]
if compare[name[error] equal[==] name[consts].ERROR_LEVEL_L] begin[:]
<ast.AugAssign object at 0x7da2054a41c0>
variable[format_info] assign[=] call[name[consts].FORMAT_INFO][name[fmt]]
return[name[format_info]] | keyword[def] identifier[calc_format_info] ( identifier[version] , identifier[error] , identifier[mask_pattern] ):
literal[string]
identifier[fmt] = identifier[mask_pattern]
keyword[if] identifier[version] > literal[int] :
keyword[if] identifier[error] == identifier[consts] . identifier[ERROR_LEVEL_L] :
identifier[fmt] += literal[int]
keyword[elif] identifier[error] == identifier[consts] . identifier[ERROR_LEVEL_H] :
identifier[fmt] += literal[int]
keyword[elif] identifier[error] == identifier[consts] . identifier[ERROR_LEVEL_Q] :
identifier[fmt] += literal[int]
identifier[format_info] = identifier[consts] . identifier[FORMAT_INFO] [ identifier[fmt] ]
keyword[else] :
identifier[fmt] += identifier[consts] . identifier[ERROR_LEVEL_TO_MICRO_MAPPING] [ identifier[version] ][ identifier[error] ]<< literal[int]
identifier[format_info] = identifier[consts] . identifier[FORMAT_INFO_MICRO] [ identifier[fmt] ]
keyword[return] identifier[format_info] | def calc_format_info(version, error, mask_pattern):
""" Returns the format information for the provided error level and mask patttern.
ISO/IEC 18004:2015(E) -- 7.9 Format information (page 55)
ISO/IEC 18004:2015(E) -- Table C.1 — Valid format information bit sequences (page 80)
:param int version: Version constant
:param int error: Error level constant.
:param int mask_pattern: Mask pattern number.
"""
fmt = mask_pattern
if version > 0:
if error == consts.ERROR_LEVEL_L:
fmt += 8 # depends on [control=['if'], data=[]]
elif error == consts.ERROR_LEVEL_H:
fmt += 16 # depends on [control=['if'], data=[]]
elif error == consts.ERROR_LEVEL_Q:
fmt += 24 # depends on [control=['if'], data=[]]
format_info = consts.FORMAT_INFO[fmt] # depends on [control=['if'], data=[]]
else:
fmt += consts.ERROR_LEVEL_TO_MICRO_MAPPING[version][error] << 2
format_info = consts.FORMAT_INFO_MICRO[fmt]
return format_info |
def _with_ast_loc(f):
"""Wrap a generator function in a decorator to supply line and column
information to the returned Python AST node. Dependency nodes will not
be hydrated, functions whose returns need dependency nodes to be
hydrated should use `_with_ast_loc_deps` below."""
@wraps(f)
def with_lineno_and_col(
ctx: GeneratorContext, node: Node, *args, **kwargs
) -> GeneratedPyAST:
py_ast = f(ctx, node, *args, **kwargs)
return _ast_with_loc(py_ast, node.env)
return with_lineno_and_col | def function[_with_ast_loc, parameter[f]]:
constant[Wrap a generator function in a decorator to supply line and column
information to the returned Python AST node. Dependency nodes will not
be hydrated, functions whose returns need dependency nodes to be
hydrated should use `_with_ast_loc_deps` below.]
def function[with_lineno_and_col, parameter[ctx, node]]:
variable[py_ast] assign[=] call[name[f], parameter[name[ctx], name[node], <ast.Starred object at 0x7da1b03b91e0>]]
return[call[name[_ast_with_loc], parameter[name[py_ast], name[node].env]]]
return[name[with_lineno_and_col]] | keyword[def] identifier[_with_ast_loc] ( identifier[f] ):
literal[string]
@ identifier[wraps] ( identifier[f] )
keyword[def] identifier[with_lineno_and_col] (
identifier[ctx] : identifier[GeneratorContext] , identifier[node] : identifier[Node] ,* identifier[args] ,** identifier[kwargs]
)-> identifier[GeneratedPyAST] :
identifier[py_ast] = identifier[f] ( identifier[ctx] , identifier[node] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[_ast_with_loc] ( identifier[py_ast] , identifier[node] . identifier[env] )
keyword[return] identifier[with_lineno_and_col] | def _with_ast_loc(f):
"""Wrap a generator function in a decorator to supply line and column
information to the returned Python AST node. Dependency nodes will not
be hydrated, functions whose returns need dependency nodes to be
hydrated should use `_with_ast_loc_deps` below."""
@wraps(f)
def with_lineno_and_col(ctx: GeneratorContext, node: Node, *args, **kwargs) -> GeneratedPyAST:
py_ast = f(ctx, node, *args, **kwargs)
return _ast_with_loc(py_ast, node.env)
return with_lineno_and_col |
def dispatch(self, *args, **kwargs):
"""This decorator sets this view to have restricted permissions."""
return super(AnimalYearArchive, self).dispatch(*args, **kwargs) | def function[dispatch, parameter[self]]:
constant[This decorator sets this view to have restricted permissions.]
return[call[call[name[super], parameter[name[AnimalYearArchive], name[self]]].dispatch, parameter[<ast.Starred object at 0x7da20e74a770>]]] | keyword[def] identifier[dispatch] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[super] ( identifier[AnimalYearArchive] , identifier[self] ). identifier[dispatch] (* identifier[args] ,** identifier[kwargs] ) | def dispatch(self, *args, **kwargs):
"""This decorator sets this view to have restricted permissions."""
return super(AnimalYearArchive, self).dispatch(*args, **kwargs) |
def validate_json_schema(self):
"""Validate the JSON schema. Return list of errors."""
errors = []
for work in self:
for task in work:
if not task.get_results().validate_json_schema():
errors.append(task)
if not work.get_results().validate_json_schema():
errors.append(work)
if not self.get_results().validate_json_schema():
errors.append(self)
return errors | def function[validate_json_schema, parameter[self]]:
constant[Validate the JSON schema. Return list of errors.]
variable[errors] assign[=] list[[]]
for taget[name[work]] in starred[name[self]] begin[:]
for taget[name[task]] in starred[name[work]] begin[:]
if <ast.UnaryOp object at 0x7da204564a30> begin[:]
call[name[errors].append, parameter[name[task]]]
if <ast.UnaryOp object at 0x7da2045661d0> begin[:]
call[name[errors].append, parameter[name[work]]]
if <ast.UnaryOp object at 0x7da2045666b0> begin[:]
call[name[errors].append, parameter[name[self]]]
return[name[errors]] | keyword[def] identifier[validate_json_schema] ( identifier[self] ):
literal[string]
identifier[errors] =[]
keyword[for] identifier[work] keyword[in] identifier[self] :
keyword[for] identifier[task] keyword[in] identifier[work] :
keyword[if] keyword[not] identifier[task] . identifier[get_results] (). identifier[validate_json_schema] ():
identifier[errors] . identifier[append] ( identifier[task] )
keyword[if] keyword[not] identifier[work] . identifier[get_results] (). identifier[validate_json_schema] ():
identifier[errors] . identifier[append] ( identifier[work] )
keyword[if] keyword[not] identifier[self] . identifier[get_results] (). identifier[validate_json_schema] ():
identifier[errors] . identifier[append] ( identifier[self] )
keyword[return] identifier[errors] | def validate_json_schema(self):
"""Validate the JSON schema. Return list of errors."""
errors = []
for work in self:
for task in work:
if not task.get_results().validate_json_schema():
errors.append(task) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['task']]
if not work.get_results().validate_json_schema():
errors.append(work) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['work']]
if not self.get_results().validate_json_schema():
errors.append(self) # depends on [control=['if'], data=[]]
return errors |
def get_query_dict(self, **kwargs):
"""
function to generate a filter dictionary, in which the key is the
keyword used in django filter function in string form, and the value is
the searched value.
:param kwargs:dict: query dict sent by data tables package
:return: dict: filtering dictionary
"""
total_cols = ensure(int, kwargs.get('total_cols', [0])[0], 0)
mapping = self.mapping
filter_dict = defaultdict(dict)
# set up the starter, since sometimes we start the enumeration from '1'
starter = mapping.keys()[0]
for i in range(starter, total_cols):
key = 'columns[{index}]'.format(index=i)
if kwargs.get(key + '[searchable]', [0])[0] != 'true':
continue
search_value = kwargs.get(key + '[search][value]', [''])[0].strip()
if not search_value:
continue
enum_item = mapping.from_key(i)
filter_obj = enum_item.extra
if type(filter_obj) is tuple and len(filter_obj) == 2:
filter_func, filter_key = filter_obj
filter_dict[filter_func][filter_key] = search_value
elif type(filter_obj) is str:
filter_dict['filter'][filter_obj] = search_value
else:
raise ValueError("Invalid filter key.")
return filter_dict | def function[get_query_dict, parameter[self]]:
constant[
function to generate a filter dictionary, in which the key is the
keyword used in django filter function in string form, and the value is
the searched value.
:param kwargs:dict: query dict sent by data tables package
:return: dict: filtering dictionary
]
variable[total_cols] assign[=] call[name[ensure], parameter[name[int], call[call[name[kwargs].get, parameter[constant[total_cols], list[[<ast.Constant object at 0x7da1b23358d0>]]]]][constant[0]], constant[0]]]
variable[mapping] assign[=] name[self].mapping
variable[filter_dict] assign[=] call[name[defaultdict], parameter[name[dict]]]
variable[starter] assign[=] call[call[name[mapping].keys, parameter[]]][constant[0]]
for taget[name[i]] in starred[call[name[range], parameter[name[starter], name[total_cols]]]] begin[:]
variable[key] assign[=] call[constant[columns[{index}]].format, parameter[]]
if compare[call[call[name[kwargs].get, parameter[binary_operation[name[key] + constant[[searchable]]], list[[<ast.Constant object at 0x7da1b23353c0>]]]]][constant[0]] not_equal[!=] constant[true]] begin[:]
continue
variable[search_value] assign[=] call[call[call[name[kwargs].get, parameter[binary_operation[name[key] + constant[[search][value]]], list[[<ast.Constant object at 0x7da1b24affd0>]]]]][constant[0]].strip, parameter[]]
if <ast.UnaryOp object at 0x7da1b24afa60> begin[:]
continue
variable[enum_item] assign[=] call[name[mapping].from_key, parameter[name[i]]]
variable[filter_obj] assign[=] name[enum_item].extra
if <ast.BoolOp object at 0x7da1b24ada80> begin[:]
<ast.Tuple object at 0x7da1b24ac430> assign[=] name[filter_obj]
call[call[name[filter_dict]][name[filter_func]]][name[filter_key]] assign[=] name[search_value]
return[name[filter_dict]] | keyword[def] identifier[get_query_dict] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[total_cols] = identifier[ensure] ( identifier[int] , identifier[kwargs] . identifier[get] ( literal[string] ,[ literal[int] ])[ literal[int] ], literal[int] )
identifier[mapping] = identifier[self] . identifier[mapping]
identifier[filter_dict] = identifier[defaultdict] ( identifier[dict] )
identifier[starter] = identifier[mapping] . identifier[keys] ()[ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[starter] , identifier[total_cols] ):
identifier[key] = literal[string] . identifier[format] ( identifier[index] = identifier[i] )
keyword[if] identifier[kwargs] . identifier[get] ( identifier[key] + literal[string] ,[ literal[int] ])[ literal[int] ]!= literal[string] :
keyword[continue]
identifier[search_value] = identifier[kwargs] . identifier[get] ( identifier[key] + literal[string] ,[ literal[string] ])[ literal[int] ]. identifier[strip] ()
keyword[if] keyword[not] identifier[search_value] :
keyword[continue]
identifier[enum_item] = identifier[mapping] . identifier[from_key] ( identifier[i] )
identifier[filter_obj] = identifier[enum_item] . identifier[extra]
keyword[if] identifier[type] ( identifier[filter_obj] ) keyword[is] identifier[tuple] keyword[and] identifier[len] ( identifier[filter_obj] )== literal[int] :
identifier[filter_func] , identifier[filter_key] = identifier[filter_obj]
identifier[filter_dict] [ identifier[filter_func] ][ identifier[filter_key] ]= identifier[search_value]
keyword[elif] identifier[type] ( identifier[filter_obj] ) keyword[is] identifier[str] :
identifier[filter_dict] [ literal[string] ][ identifier[filter_obj] ]= identifier[search_value]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[filter_dict] | def get_query_dict(self, **kwargs):
"""
function to generate a filter dictionary, in which the key is the
keyword used in django filter function in string form, and the value is
the searched value.
:param kwargs:dict: query dict sent by data tables package
:return: dict: filtering dictionary
"""
total_cols = ensure(int, kwargs.get('total_cols', [0])[0], 0)
mapping = self.mapping
filter_dict = defaultdict(dict)
# set up the starter, since sometimes we start the enumeration from '1'
starter = mapping.keys()[0]
for i in range(starter, total_cols):
key = 'columns[{index}]'.format(index=i)
if kwargs.get(key + '[searchable]', [0])[0] != 'true':
continue # depends on [control=['if'], data=[]]
search_value = kwargs.get(key + '[search][value]', [''])[0].strip()
if not search_value:
continue # depends on [control=['if'], data=[]]
enum_item = mapping.from_key(i)
filter_obj = enum_item.extra
if type(filter_obj) is tuple and len(filter_obj) == 2:
(filter_func, filter_key) = filter_obj
filter_dict[filter_func][filter_key] = search_value # depends on [control=['if'], data=[]]
elif type(filter_obj) is str:
filter_dict['filter'][filter_obj] = search_value # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid filter key.') # depends on [control=['for'], data=['i']]
return filter_dict |
def flash_firmware(self, redfish_inst, file_url):
"""Perform firmware flashing on a redfish system
:param file_url: url to firmware bits.
:param redfish_inst: redfish instance
:raises: IloError, on an error from iLO.
"""
action_data = {
'ImageURI': file_url,
}
target_uri = self._get_firmware_update_element().target_uri
try:
self._conn.post(target_uri, data=action_data)
except sushy.exceptions.SushyError as e:
msg = (('The Redfish controller failed to update firmware '
'with file %(file)s Error %(error)s') %
{'file': file_url, 'error': str(e)})
LOG.debug(msg) # noqa
raise exception.IloError(msg)
self.wait_for_redfish_firmware_update_to_complete(redfish_inst)
try:
state, percent = self.get_firmware_update_progress()
except sushy.exceptions.SushyError as e:
msg = ('Failed to get firmware progress update '
'Error %(error)s' % {'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
if state == "Error":
msg = 'Unable to update firmware'
LOG.debug(msg) # noqa
raise exception.IloError(msg)
elif state == "Unknown":
msg = 'Status of firmware update not known'
LOG.debug(msg) # noqa
else: # "Complete" | "Idle"
LOG.info('Flashing firmware file: %s ... done', file_url) | def function[flash_firmware, parameter[self, redfish_inst, file_url]]:
constant[Perform firmware flashing on a redfish system
:param file_url: url to firmware bits.
:param redfish_inst: redfish instance
:raises: IloError, on an error from iLO.
]
variable[action_data] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf490>], [<ast.Name object at 0x7da18c4cddb0>]]
variable[target_uri] assign[=] call[name[self]._get_firmware_update_element, parameter[]].target_uri
<ast.Try object at 0x7da18c4ccbe0>
call[name[self].wait_for_redfish_firmware_update_to_complete, parameter[name[redfish_inst]]]
<ast.Try object at 0x7da1b1a79ea0>
if compare[name[state] equal[==] constant[Error]] begin[:]
variable[msg] assign[=] constant[Unable to update firmware]
call[name[LOG].debug, parameter[name[msg]]]
<ast.Raise object at 0x7da1b1a79c60> | keyword[def] identifier[flash_firmware] ( identifier[self] , identifier[redfish_inst] , identifier[file_url] ):
literal[string]
identifier[action_data] ={
literal[string] : identifier[file_url] ,
}
identifier[target_uri] = identifier[self] . identifier[_get_firmware_update_element] (). identifier[target_uri]
keyword[try] :
identifier[self] . identifier[_conn] . identifier[post] ( identifier[target_uri] , identifier[data] = identifier[action_data] )
keyword[except] identifier[sushy] . identifier[exceptions] . identifier[SushyError] keyword[as] identifier[e] :
identifier[msg] =(( literal[string]
literal[string] )%
{ literal[string] : identifier[file_url] , literal[string] : identifier[str] ( identifier[e] )})
identifier[LOG] . identifier[debug] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[IloError] ( identifier[msg] )
identifier[self] . identifier[wait_for_redfish_firmware_update_to_complete] ( identifier[redfish_inst] )
keyword[try] :
identifier[state] , identifier[percent] = identifier[self] . identifier[get_firmware_update_progress] ()
keyword[except] identifier[sushy] . identifier[exceptions] . identifier[SushyError] keyword[as] identifier[e] :
identifier[msg] =( literal[string]
literal[string] %{ literal[string] : identifier[str] ( identifier[e] )})
identifier[LOG] . identifier[debug] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[IloError] ( identifier[msg] )
keyword[if] identifier[state] == literal[string] :
identifier[msg] = literal[string]
identifier[LOG] . identifier[debug] ( identifier[msg] )
keyword[raise] identifier[exception] . identifier[IloError] ( identifier[msg] )
keyword[elif] identifier[state] == literal[string] :
identifier[msg] = literal[string]
identifier[LOG] . identifier[debug] ( identifier[msg] )
keyword[else] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[file_url] ) | def flash_firmware(self, redfish_inst, file_url):
"""Perform firmware flashing on a redfish system
:param file_url: url to firmware bits.
:param redfish_inst: redfish instance
:raises: IloError, on an error from iLO.
"""
action_data = {'ImageURI': file_url}
target_uri = self._get_firmware_update_element().target_uri
try:
self._conn.post(target_uri, data=action_data) # depends on [control=['try'], data=[]]
except sushy.exceptions.SushyError as e:
msg = 'The Redfish controller failed to update firmware with file %(file)s Error %(error)s' % {'file': file_url, 'error': str(e)}
LOG.debug(msg) # noqa
raise exception.IloError(msg) # depends on [control=['except'], data=['e']]
self.wait_for_redfish_firmware_update_to_complete(redfish_inst)
try:
(state, percent) = self.get_firmware_update_progress() # depends on [control=['try'], data=[]]
except sushy.exceptions.SushyError as e:
msg = 'Failed to get firmware progress update Error %(error)s' % {'error': str(e)}
LOG.debug(msg)
raise exception.IloError(msg) # depends on [control=['except'], data=['e']]
if state == 'Error':
msg = 'Unable to update firmware'
LOG.debug(msg) # noqa
raise exception.IloError(msg) # depends on [control=['if'], data=[]]
elif state == 'Unknown':
msg = 'Status of firmware update not known'
LOG.debug(msg) # noqa # depends on [control=['if'], data=[]]
else: # "Complete" | "Idle"
LOG.info('Flashing firmware file: %s ... done', file_url) |
def update_text(self):
""" Write the current text, and check for any new text changes.
This also updates the elapsed time.
"""
self.write()
try:
newtext = self.text_queue.get_nowait()
self._text = newtext
except Empty:
pass | def function[update_text, parameter[self]]:
constant[ Write the current text, and check for any new text changes.
This also updates the elapsed time.
]
call[name[self].write, parameter[]]
<ast.Try object at 0x7da1b02bece0> | keyword[def] identifier[update_text] ( identifier[self] ):
literal[string]
identifier[self] . identifier[write] ()
keyword[try] :
identifier[newtext] = identifier[self] . identifier[text_queue] . identifier[get_nowait] ()
identifier[self] . identifier[_text] = identifier[newtext]
keyword[except] identifier[Empty] :
keyword[pass] | def update_text(self):
""" Write the current text, and check for any new text changes.
This also updates the elapsed time.
"""
self.write()
try:
newtext = self.text_queue.get_nowait()
self._text = newtext # depends on [control=['try'], data=[]]
except Empty:
pass # depends on [control=['except'], data=[]] |
def load_cPkl(fpath, verbose=None, n=None):
"""
Loads a pickled file with optional verbosity.
Aims for compatibility between python2 and python3.
TestPickleExtentsSimple:
>>> def makedata_simple():
>>> data = np.empty((500, 2 ** 20), dtype=np.uint8) + 1
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_simple()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleExtentsComplex:
>>> def makedata_complex():
>>> rng = np.random.RandomState(42)
>>> item1 = np.empty((100, 2 ** 20), dtype=np.uint8) + 1
>>> item2 = [np.empty((10, 2 ** 10), dtype=np.uint8) + 1
>>> for a in range(1000)]
>>> item3 = {a: np.empty(int(rng.rand() * 10), dtype=np.int16) + 1
>>> for a in range(100)}
>>> item4 = {np.int32(a): np.empty((int(rng.rand() * 10), 2), dtype=np.float64) + 1
>>> for a in range(200)}
>>> data = {'item1': item1, 'item2': item2,
>>> 'item3': item3, 'item4': item4}
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_complex()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data2 = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleCacher:
>>> memtrack = ut.MemoryTracker()
>>> cacher = ut.Cacher('tmp', cache_dir='.', cfgstr='foo')
>>> data3 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> data4 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> memtrack = ut.MemoryTracker()
>>> fpath = '/home/joncrall/Desktop/smkcache/inva_PZ_Master1VUUIDS((5616)vxihbjwtggyovrto)_vpgwpcafbjkkpjdf.cPkl'
>>> print(ut.get_file_nBytes_str(fpath))
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
def makedata_complex():
data = np.empty((1000, 2 ** 20), dtype=np.uint8)
data[:] = 0
return data
"""
verbose = _rectify_verb_read(verbose)
if verbose:
print('[util_io] * load_cPkl(%r)' % (util_path.tail(fpath, n=n),))
try:
with open(fpath, 'rb') as file_:
data = pickle.load(file_)
except UnicodeDecodeError:
if six.PY3:
# try to open python2 pickle
with open(fpath, 'rb') as file_:
data = pickle.load(file_, encoding='latin1')
else:
raise
except ValueError as ex:
if six.PY2:
if ex.message == 'unsupported pickle protocol: 4':
raise ValueError(
'unsupported Python3 pickle protocol 4 '
'in Python2 for fpath=%r' % (fpath,))
else:
raise
else:
raise
return data | def function[load_cPkl, parameter[fpath, verbose, n]]:
constant[
Loads a pickled file with optional verbosity.
Aims for compatibility between python2 and python3.
TestPickleExtentsSimple:
>>> def makedata_simple():
>>> data = np.empty((500, 2 ** 20), dtype=np.uint8) + 1
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_simple()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleExtentsComplex:
>>> def makedata_complex():
>>> rng = np.random.RandomState(42)
>>> item1 = np.empty((100, 2 ** 20), dtype=np.uint8) + 1
>>> item2 = [np.empty((10, 2 ** 10), dtype=np.uint8) + 1
>>> for a in range(1000)]
>>> item3 = {a: np.empty(int(rng.rand() * 10), dtype=np.int16) + 1
>>> for a in range(100)}
>>> item4 = {np.int32(a): np.empty((int(rng.rand() * 10), 2), dtype=np.float64) + 1
>>> for a in range(200)}
>>> data = {'item1': item1, 'item2': item2,
>>> 'item3': item3, 'item4': item4}
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_complex()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data2 = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleCacher:
>>> memtrack = ut.MemoryTracker()
>>> cacher = ut.Cacher('tmp', cache_dir='.', cfgstr='foo')
>>> data3 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> data4 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> memtrack = ut.MemoryTracker()
>>> fpath = '/home/joncrall/Desktop/smkcache/inva_PZ_Master1VUUIDS((5616)vxihbjwtggyovrto)_vpgwpcafbjkkpjdf.cPkl'
>>> print(ut.get_file_nBytes_str(fpath))
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
def makedata_complex():
data = np.empty((1000, 2 ** 20), dtype=np.uint8)
data[:] = 0
return data
]
variable[verbose] assign[=] call[name[_rectify_verb_read], parameter[name[verbose]]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[[util_io] * load_cPkl(%r)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b24bc790>]]]]]
<ast.Try object at 0x7da1b24beb00>
return[name[data]] | keyword[def] identifier[load_cPkl] ( identifier[fpath] , identifier[verbose] = keyword[None] , identifier[n] = keyword[None] ):
literal[string]
identifier[verbose] = identifier[_rectify_verb_read] ( identifier[verbose] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[util_path] . identifier[tail] ( identifier[fpath] , identifier[n] = identifier[n] ),))
keyword[try] :
keyword[with] identifier[open] ( identifier[fpath] , literal[string] ) keyword[as] identifier[file_] :
identifier[data] = identifier[pickle] . identifier[load] ( identifier[file_] )
keyword[except] identifier[UnicodeDecodeError] :
keyword[if] identifier[six] . identifier[PY3] :
keyword[with] identifier[open] ( identifier[fpath] , literal[string] ) keyword[as] identifier[file_] :
identifier[data] = identifier[pickle] . identifier[load] ( identifier[file_] , identifier[encoding] = literal[string] )
keyword[else] :
keyword[raise]
keyword[except] identifier[ValueError] keyword[as] identifier[ex] :
keyword[if] identifier[six] . identifier[PY2] :
keyword[if] identifier[ex] . identifier[message] == literal[string] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] %( identifier[fpath] ,))
keyword[else] :
keyword[raise]
keyword[else] :
keyword[raise]
keyword[return] identifier[data] | def load_cPkl(fpath, verbose=None, n=None):
"""
Loads a pickled file with optional verbosity.
Aims for compatibility between python2 and python3.
TestPickleExtentsSimple:
>>> def makedata_simple():
>>> data = np.empty((500, 2 ** 20), dtype=np.uint8) + 1
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_simple()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleExtentsComplex:
>>> def makedata_complex():
>>> rng = np.random.RandomState(42)
>>> item1 = np.empty((100, 2 ** 20), dtype=np.uint8) + 1
>>> item2 = [np.empty((10, 2 ** 10), dtype=np.uint8) + 1
>>> for a in range(1000)]
>>> item3 = {a: np.empty(int(rng.rand() * 10), dtype=np.int16) + 1
>>> for a in range(100)}
>>> item4 = {np.int32(a): np.empty((int(rng.rand() * 10), 2), dtype=np.float64) + 1
>>> for a in range(200)}
>>> data = {'item1': item1, 'item2': item2,
>>> 'item3': item3, 'item4': item4}
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_complex()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data2 = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleCacher:
>>> memtrack = ut.MemoryTracker()
>>> cacher = ut.Cacher('tmp', cache_dir='.', cfgstr='foo')
>>> data3 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> data4 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> memtrack = ut.MemoryTracker()
>>> fpath = '/home/joncrall/Desktop/smkcache/inva_PZ_Master1VUUIDS((5616)vxihbjwtggyovrto)_vpgwpcafbjkkpjdf.cPkl'
>>> print(ut.get_file_nBytes_str(fpath))
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
def makedata_complex():
data = np.empty((1000, 2 ** 20), dtype=np.uint8)
data[:] = 0
return data
"""
verbose = _rectify_verb_read(verbose)
if verbose:
print('[util_io] * load_cPkl(%r)' % (util_path.tail(fpath, n=n),)) # depends on [control=['if'], data=[]]
try:
with open(fpath, 'rb') as file_:
data = pickle.load(file_) # depends on [control=['with'], data=['file_']] # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
if six.PY3:
# try to open python2 pickle
with open(fpath, 'rb') as file_:
data = pickle.load(file_, encoding='latin1') # depends on [control=['with'], data=['file_']] # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=[]]
except ValueError as ex:
if six.PY2:
if ex.message == 'unsupported pickle protocol: 4':
raise ValueError('unsupported Python3 pickle protocol 4 in Python2 for fpath=%r' % (fpath,)) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['ex']]
return data |
def process_grammars(self, grammars):
"""Process provided grammars by parsing them into Python objects."""
for path in self.default_grammars:
grammars.insert(0, open(os.path.relpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.normcase(path)))))
for fo in grammars:
logging.debug("Processing grammar content of %s", fo.name)
self.set_namespace(os.path.splitext(os.path.basename(fo.name))[0])
for line in fo:
self.parse_line(line)
self.handle_empty_line()
self.resolve_xref()
self.calculate_leaf_paths() | def function[process_grammars, parameter[self, grammars]]:
constant[Process provided grammars by parsing them into Python objects.]
for taget[name[path]] in starred[name[self].default_grammars] begin[:]
call[name[grammars].insert, parameter[constant[0], call[name[open], parameter[call[name[os].path.relpath, parameter[call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[__file__]]]]], call[name[os].path.normcase, parameter[name[path]]]]]]]]]]]
for taget[name[fo]] in starred[name[grammars]] begin[:]
call[name[logging].debug, parameter[constant[Processing grammar content of %s], name[fo].name]]
call[name[self].set_namespace, parameter[call[call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[fo].name]]]]][constant[0]]]]
for taget[name[line]] in starred[name[fo]] begin[:]
call[name[self].parse_line, parameter[name[line]]]
call[name[self].handle_empty_line, parameter[]]
call[name[self].resolve_xref, parameter[]]
call[name[self].calculate_leaf_paths, parameter[]] | keyword[def] identifier[process_grammars] ( identifier[self] , identifier[grammars] ):
literal[string]
keyword[for] identifier[path] keyword[in] identifier[self] . identifier[default_grammars] :
identifier[grammars] . identifier[insert] ( literal[int] , identifier[open] ( identifier[os] . identifier[path] . identifier[relpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] )),
identifier[os] . identifier[path] . identifier[normcase] ( identifier[path] )))))
keyword[for] identifier[fo] keyword[in] identifier[grammars] :
identifier[logging] . identifier[debug] ( literal[string] , identifier[fo] . identifier[name] )
identifier[self] . identifier[set_namespace] ( identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[fo] . identifier[name] ))[ literal[int] ])
keyword[for] identifier[line] keyword[in] identifier[fo] :
identifier[self] . identifier[parse_line] ( identifier[line] )
identifier[self] . identifier[handle_empty_line] ()
identifier[self] . identifier[resolve_xref] ()
identifier[self] . identifier[calculate_leaf_paths] () | def process_grammars(self, grammars):
"""Process provided grammars by parsing them into Python objects."""
for path in self.default_grammars:
grammars.insert(0, open(os.path.relpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.normcase(path))))) # depends on [control=['for'], data=['path']]
for fo in grammars:
logging.debug('Processing grammar content of %s', fo.name)
self.set_namespace(os.path.splitext(os.path.basename(fo.name))[0])
for line in fo:
self.parse_line(line) # depends on [control=['for'], data=['line']]
self.handle_empty_line() # depends on [control=['for'], data=['fo']]
self.resolve_xref()
self.calculate_leaf_paths() |
def get_enclosingmethod(self):
"""
the class.method or class (if the definition is not from within a
method) that encloses the definition of this class. Returns
None if this was not an inner class.
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.7
""" # noqa
buff = self.get_attribute("EnclosingMethod")
# TODO:
# Running across classes with data in this attribute like
# 00 06 00 00
# which would be the 6th const for the class name, and the
# zero-th (INVALID) const for method. Maybe this is static
# inner classes?
if buff is None:
return None
# class index, method index
with unpack(buff) as up:
ci, mi = up.unpack_struct(_HH)
result = None
if ci and mi:
enc_class = self.deref_const(ci)
enc_meth, enc_type = self.deref_const(mi)
result = "%s.%s%s" % (enc_class, enc_meth, enc_type)
elif ci:
result = self.deref_const(ci)
return result | def function[get_enclosingmethod, parameter[self]]:
constant[
the class.method or class (if the definition is not from within a
method) that encloses the definition of this class. Returns
None if this was not an inner class.
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.7
]
variable[buff] assign[=] call[name[self].get_attribute, parameter[constant[EnclosingMethod]]]
if compare[name[buff] is constant[None]] begin[:]
return[constant[None]]
with call[name[unpack], parameter[name[buff]]] begin[:]
<ast.Tuple object at 0x7da1b0b59840> assign[=] call[name[up].unpack_struct, parameter[name[_HH]]]
variable[result] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b0b588e0> begin[:]
variable[enc_class] assign[=] call[name[self].deref_const, parameter[name[ci]]]
<ast.Tuple object at 0x7da1b0b58bb0> assign[=] call[name[self].deref_const, parameter[name[mi]]]
variable[result] assign[=] binary_operation[constant[%s.%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0b5a3e0>, <ast.Name object at 0x7da1b0b597e0>, <ast.Name object at 0x7da1b0b5b550>]]]
return[name[result]] | keyword[def] identifier[get_enclosingmethod] ( identifier[self] ):
literal[string]
identifier[buff] = identifier[self] . identifier[get_attribute] ( literal[string] )
keyword[if] identifier[buff] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[with] identifier[unpack] ( identifier[buff] ) keyword[as] identifier[up] :
identifier[ci] , identifier[mi] = identifier[up] . identifier[unpack_struct] ( identifier[_HH] )
identifier[result] = keyword[None]
keyword[if] identifier[ci] keyword[and] identifier[mi] :
identifier[enc_class] = identifier[self] . identifier[deref_const] ( identifier[ci] )
identifier[enc_meth] , identifier[enc_type] = identifier[self] . identifier[deref_const] ( identifier[mi] )
identifier[result] = literal[string] %( identifier[enc_class] , identifier[enc_meth] , identifier[enc_type] )
keyword[elif] identifier[ci] :
identifier[result] = identifier[self] . identifier[deref_const] ( identifier[ci] )
keyword[return] identifier[result] | def get_enclosingmethod(self):
"""
the class.method or class (if the definition is not from within a
method) that encloses the definition of this class. Returns
None if this was not an inner class.
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.7
""" # noqa
buff = self.get_attribute('EnclosingMethod')
# TODO:
# Running across classes with data in this attribute like
# 00 06 00 00
# which would be the 6th const for the class name, and the
# zero-th (INVALID) const for method. Maybe this is static
# inner classes?
if buff is None:
return None # depends on [control=['if'], data=[]]
# class index, method index
with unpack(buff) as up:
(ci, mi) = up.unpack_struct(_HH) # depends on [control=['with'], data=['up']]
result = None
if ci and mi:
enc_class = self.deref_const(ci)
(enc_meth, enc_type) = self.deref_const(mi)
result = '%s.%s%s' % (enc_class, enc_meth, enc_type) # depends on [control=['if'], data=[]]
elif ci:
result = self.deref_const(ci) # depends on [control=['if'], data=[]]
return result |
def verify_message(self, expected_response, break_in_fail=True):
"""
Verifies that expected_response is found in self.lines.
:param expected_response: response or responses to look for. Must be list or str.
:param break_in_fail: If set to True,
re-raises exceptions caught or if message was not found
:return: True or False
:raises: LookupError if message was not found and break_in_fail was True. Other exceptions
might also be raised through searcher.verify_message.
"""
ok = True
try:
ok = verify_message(self.lines, expected_response)
except (TypeError, LookupError) as inst:
ok = False
if break_in_fail:
raise inst
if ok is False and break_in_fail:
raise LookupError("Unexpected message found")
return ok | def function[verify_message, parameter[self, expected_response, break_in_fail]]:
constant[
Verifies that expected_response is found in self.lines.
:param expected_response: response or responses to look for. Must be list or str.
:param break_in_fail: If set to True,
re-raises exceptions caught or if message was not found
:return: True or False
:raises: LookupError if message was not found and break_in_fail was True. Other exceptions
might also be raised through searcher.verify_message.
]
variable[ok] assign[=] constant[True]
<ast.Try object at 0x7da1b0ed0eb0>
if <ast.BoolOp object at 0x7da1b0ed1840> begin[:]
<ast.Raise object at 0x7da1b0ed0e80>
return[name[ok]] | keyword[def] identifier[verify_message] ( identifier[self] , identifier[expected_response] , identifier[break_in_fail] = keyword[True] ):
literal[string]
identifier[ok] = keyword[True]
keyword[try] :
identifier[ok] = identifier[verify_message] ( identifier[self] . identifier[lines] , identifier[expected_response] )
keyword[except] ( identifier[TypeError] , identifier[LookupError] ) keyword[as] identifier[inst] :
identifier[ok] = keyword[False]
keyword[if] identifier[break_in_fail] :
keyword[raise] identifier[inst]
keyword[if] identifier[ok] keyword[is] keyword[False] keyword[and] identifier[break_in_fail] :
keyword[raise] identifier[LookupError] ( literal[string] )
keyword[return] identifier[ok] | def verify_message(self, expected_response, break_in_fail=True):
"""
Verifies that expected_response is found in self.lines.
:param expected_response: response or responses to look for. Must be list or str.
:param break_in_fail: If set to True,
re-raises exceptions caught or if message was not found
:return: True or False
:raises: LookupError if message was not found and break_in_fail was True. Other exceptions
might also be raised through searcher.verify_message.
"""
ok = True
try:
ok = verify_message(self.lines, expected_response) # depends on [control=['try'], data=[]]
except (TypeError, LookupError) as inst:
ok = False
if break_in_fail:
raise inst # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['inst']]
if ok is False and break_in_fail:
raise LookupError('Unexpected message found') # depends on [control=['if'], data=[]]
return ok |
def chrome_driver(self, **kwargs):
"""
supported:
to = timeout, 30
images = load images, 0
"""
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
prefs = {"profile.managed_default_content_settings.images": kwargs.get('images', 0)}
options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(chrome_options=options)
driver.set_window_size(1366, 700)
driver.set_window_position(32, 0)
driver.set_page_load_timeout(kwargs.get('to', 30))
return driver | def function[chrome_driver, parameter[self]]:
constant[
supported:
to = timeout, 30
images = load images, 0
]
variable[options] assign[=] call[name[webdriver].ChromeOptions, parameter[]]
call[name[options].add_argument, parameter[constant[--no-sandbox]]]
variable[prefs] assign[=] dictionary[[<ast.Constant object at 0x7da1b2347520>], [<ast.Call object at 0x7da1b2346dd0>]]
call[name[options].add_experimental_option, parameter[constant[prefs], name[prefs]]]
variable[driver] assign[=] call[name[webdriver].Chrome, parameter[]]
call[name[driver].set_window_size, parameter[constant[1366], constant[700]]]
call[name[driver].set_window_position, parameter[constant[32], constant[0]]]
call[name[driver].set_page_load_timeout, parameter[call[name[kwargs].get, parameter[constant[to], constant[30]]]]]
return[name[driver]] | keyword[def] identifier[chrome_driver] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[options] = identifier[webdriver] . identifier[ChromeOptions] ()
identifier[options] . identifier[add_argument] ( literal[string] )
identifier[prefs] ={ literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )}
identifier[options] . identifier[add_experimental_option] ( literal[string] , identifier[prefs] )
identifier[driver] = identifier[webdriver] . identifier[Chrome] ( identifier[chrome_options] = identifier[options] )
identifier[driver] . identifier[set_window_size] ( literal[int] , literal[int] )
identifier[driver] . identifier[set_window_position] ( literal[int] , literal[int] )
identifier[driver] . identifier[set_page_load_timeout] ( identifier[kwargs] . identifier[get] ( literal[string] , literal[int] ))
keyword[return] identifier[driver] | def chrome_driver(self, **kwargs):
"""
supported:
to = timeout, 30
images = load images, 0
"""
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
prefs = {'profile.managed_default_content_settings.images': kwargs.get('images', 0)}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(chrome_options=options)
driver.set_window_size(1366, 700)
driver.set_window_position(32, 0)
driver.set_page_load_timeout(kwargs.get('to', 30))
return driver |
def gethash(compiled):
"""Retrieve a hash from a header."""
lines = compiled.splitlines()
if len(lines) < 3 or not lines[2].startswith(hash_prefix):
return None
else:
return lines[2][len(hash_prefix):] | def function[gethash, parameter[compiled]]:
constant[Retrieve a hash from a header.]
variable[lines] assign[=] call[name[compiled].splitlines, parameter[]]
if <ast.BoolOp object at 0x7da18dc07be0> begin[:]
return[constant[None]] | keyword[def] identifier[gethash] ( identifier[compiled] ):
literal[string]
identifier[lines] = identifier[compiled] . identifier[splitlines] ()
keyword[if] identifier[len] ( identifier[lines] )< literal[int] keyword[or] keyword[not] identifier[lines] [ literal[int] ]. identifier[startswith] ( identifier[hash_prefix] ):
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[lines] [ literal[int] ][ identifier[len] ( identifier[hash_prefix] ):] | def gethash(compiled):
"""Retrieve a hash from a header."""
lines = compiled.splitlines()
if len(lines) < 3 or not lines[2].startswith(hash_prefix):
return None # depends on [control=['if'], data=[]]
else:
return lines[2][len(hash_prefix):] |
def get_annotation_comment(self, r_term):
"""Returns annotation comment or None if found none or more than one.
Reports errors.
"""
comment_list = list(self.graph.triples((r_term, RDFS.comment, None)))
if len(comment_list) > 1:
self.error = True
msg = 'Annotation can have at most one comment.'
self.logger.log(msg)
return
else:
return six.text_type(comment_list[0][2]) | def function[get_annotation_comment, parameter[self, r_term]]:
constant[Returns annotation comment or None if found none or more than one.
Reports errors.
]
variable[comment_list] assign[=] call[name[list], parameter[call[name[self].graph.triples, parameter[tuple[[<ast.Name object at 0x7da1b020f760>, <ast.Attribute object at 0x7da1b020cd90>, <ast.Constant object at 0x7da1b020ffa0>]]]]]]
if compare[call[name[len], parameter[name[comment_list]]] greater[>] constant[1]] begin[:]
name[self].error assign[=] constant[True]
variable[msg] assign[=] constant[Annotation can have at most one comment.]
call[name[self].logger.log, parameter[name[msg]]]
return[None] | keyword[def] identifier[get_annotation_comment] ( identifier[self] , identifier[r_term] ):
literal[string]
identifier[comment_list] = identifier[list] ( identifier[self] . identifier[graph] . identifier[triples] (( identifier[r_term] , identifier[RDFS] . identifier[comment] , keyword[None] )))
keyword[if] identifier[len] ( identifier[comment_list] )> literal[int] :
identifier[self] . identifier[error] = keyword[True]
identifier[msg] = literal[string]
identifier[self] . identifier[logger] . identifier[log] ( identifier[msg] )
keyword[return]
keyword[else] :
keyword[return] identifier[six] . identifier[text_type] ( identifier[comment_list] [ literal[int] ][ literal[int] ]) | def get_annotation_comment(self, r_term):
"""Returns annotation comment or None if found none or more than one.
Reports errors.
"""
comment_list = list(self.graph.triples((r_term, RDFS.comment, None)))
if len(comment_list) > 1:
self.error = True
msg = 'Annotation can have at most one comment.'
self.logger.log(msg)
return # depends on [control=['if'], data=[]]
else:
return six.text_type(comment_list[0][2]) |
def update_api_key_description(apiKey, description, region=None, key=None, keyid=None, profile=None):
'''
update the given apiKey with the given description.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.update_api_key_description api_key description
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = _api_key_patch_replace(conn, apiKey, '/description', description)
return {'updated': True, 'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} | def function[update_api_key_description, parameter[apiKey, description, region, key, keyid, profile]]:
constant[
update the given apiKey with the given description.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.update_api_key_description api_key description
]
<ast.Try object at 0x7da1b21638e0> | keyword[def] identifier[update_api_key_description] ( identifier[apiKey] , identifier[description] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
keyword[try] :
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
identifier[response] = identifier[_api_key_patch_replace] ( identifier[conn] , identifier[apiKey] , literal[string] , identifier[description] )
keyword[return] { literal[string] : keyword[True] , literal[string] : identifier[_convert_datetime_str] ( identifier[response] )}
keyword[except] identifier[ClientError] keyword[as] identifier[e] :
keyword[return] { literal[string] : keyword[False] , literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )} | def update_api_key_description(apiKey, description, region=None, key=None, keyid=None, profile=None):
"""
update the given apiKey with the given description.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.update_api_key_description api_key description
"""
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = _api_key_patch_replace(conn, apiKey, '/description', description)
return {'updated': True, 'apiKey': _convert_datetime_str(response)} # depends on [control=['try'], data=[]]
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']] |
def finish(self):
"""Finalize the MAR file.
The MAR header, index and signatures need to be updated once we've
finished adding all the files.
"""
# Update the last_offset in the mar header
self.write_header()
# Write out the index of contents
self.write_index()
if not self.use_old_format:
# Refresh the signature
sigs = self.calculate_signatures()
self.write_signatures(sigs) | def function[finish, parameter[self]]:
constant[Finalize the MAR file.
The MAR header, index and signatures need to be updated once we've
finished adding all the files.
]
call[name[self].write_header, parameter[]]
call[name[self].write_index, parameter[]]
if <ast.UnaryOp object at 0x7da20e748640> begin[:]
variable[sigs] assign[=] call[name[self].calculate_signatures, parameter[]]
call[name[self].write_signatures, parameter[name[sigs]]] | keyword[def] identifier[finish] ( identifier[self] ):
literal[string]
identifier[self] . identifier[write_header] ()
identifier[self] . identifier[write_index] ()
keyword[if] keyword[not] identifier[self] . identifier[use_old_format] :
identifier[sigs] = identifier[self] . identifier[calculate_signatures] ()
identifier[self] . identifier[write_signatures] ( identifier[sigs] ) | def finish(self):
"""Finalize the MAR file.
The MAR header, index and signatures need to be updated once we've
finished adding all the files.
"""
# Update the last_offset in the mar header
self.write_header()
# Write out the index of contents
self.write_index()
if not self.use_old_format:
# Refresh the signature
sigs = self.calculate_signatures()
self.write_signatures(sigs) # depends on [control=['if'], data=[]] |
def iterate_with_selected_objects_in_order(analysis_objects: Mapping[Any, Any],
analysis_iterables: Dict[str, Sequence[Any]],
selection: Union[str, Sequence[str]]) -> Iterator[List[Tuple[Any, Any]]]:
""" Iterate over an analysis dictionary, yielding the selected attributes in order.
So if there are three iterables, a, b, and c, if we selected c, then we iterate over a and b,
and return c in the same order each time for each set of values of a and b. As an example, consider
the set of iterables:
.. code-block:: python
>>> a = ["a1", "a2"]
>>> b = ["b1", "b2"]
>>> c = ["c1", "c2"]
then it will effectively return:
.. code-block:: python
>>> for a_val in a:
... for b_val in b:
... for c_val in c:
... obj(a_val, b_val, c_val)
This will yield:
.. code-block:: python
>>> output = list(iterate_with_selected_objects_in_order(..., selection = ["a"]))
[[("a1", "b1", "c1"), ("a2", "b1", "c1")], [("a1", "b2", "c1"), ("a2", "b2", "c1")], ...]
This is particularly nice because we can then select on a set of iterables to be returned without
having to specify the rest of the iterables that we don't really care about.
Args:
analysis_objects: Analysis objects dictionary.
analysis_iterables: Iterables used in constructing the analysis objects.
selection: Selection of analysis selections to return. Can be either a string or a sequence of
selections.
Yields:
object: Matching analysis object.
"""
# Validation
if isinstance(selection, str):
selection = [selection]
# Help out mypy. We don't check if it is a list to allow for other sequences.
assert not isinstance(selection, str)
# We don't want to impact the original analysis iterables when we pop some values below.
analysis_iterables = copy.copy(analysis_iterables)
# Extract the selected iterators from the possible iterators so we can select on them later.
# First, we want want each set of iterators to be of the form:
# {"selection1": [value1, value2, ...], "selection2": [value3, value4, ...]}
selected_iterators = {}
for s in selection:
selected_iterators[s] = analysis_iterables.pop(s)
logger.debug(f"Initial analysis_iterables: {analysis_iterables}")
logger.debug(f"Initial selected_iterators: {selected_iterators}")
# Now, we convert them to the form:
# [[("selection1", value1), ("selection1", value2)], [("selection2", value3), ("selection2", value4)]]
# This allows them to iterated over conveniently via itertools.product(...)
selected_iterators = [[(k, v) for v in values] for k, values in selected_iterators.items()] # type: ignore
analysis_iterables = [[(k, v) for v in values] for k, values in analysis_iterables.items()] # type: ignore
logger.debug(f"Final analysis_iterables: {analysis_iterables}")
logger.debug(f"Final selected_iterators: {selected_iterators}")
# Useful debug information, but too verbose for standard usage.
#logger.debug(f"analysis_iterables product: {list(itertools.product(*analysis_iterables))}")
#logger.debug(f"selected_iterators product: {list(itertools.product(*selected_iterators))}")
for values in itertools.product(*analysis_iterables):
selected_analysis_objects = []
for selected_values in itertools.product(*selected_iterators):
for key_index, obj in analysis_objects.items():
selected_via_analysis_iterables = all(
getattr(key_index, k) == v for k, v in values
)
selected_via_selected_iterators = all(
getattr(key_index, k) == v for k, v in selected_values
)
selected_obj = selected_via_analysis_iterables and selected_via_selected_iterators
if selected_obj:
selected_analysis_objects.append((key_index, obj))
logger.debug(f"Yielding: {selected_analysis_objects}")
yield selected_analysis_objects | def function[iterate_with_selected_objects_in_order, parameter[analysis_objects, analysis_iterables, selection]]:
constant[ Iterate over an analysis dictionary, yielding the selected attributes in order.
So if there are three iterables, a, b, and c, if we selected c, then we iterate over a and b,
and return c in the same order each time for each set of values of a and b. As an example, consider
the set of iterables:
.. code-block:: python
>>> a = ["a1", "a2"]
>>> b = ["b1", "b2"]
>>> c = ["c1", "c2"]
then it will effectively return:
.. code-block:: python
>>> for a_val in a:
... for b_val in b:
... for c_val in c:
... obj(a_val, b_val, c_val)
This will yield:
.. code-block:: python
>>> output = list(iterate_with_selected_objects_in_order(..., selection = ["a"]))
[[("a1", "b1", "c1"), ("a2", "b1", "c1")], [("a1", "b2", "c1"), ("a2", "b2", "c1")], ...]
This is particularly nice because we can then select on a set of iterables to be returned without
having to specify the rest of the iterables that we don't really care about.
Args:
analysis_objects: Analysis objects dictionary.
analysis_iterables: Iterables used in constructing the analysis objects.
selection: Selection of analysis selections to return. Can be either a string or a sequence of
selections.
Yields:
object: Matching analysis object.
]
if call[name[isinstance], parameter[name[selection], name[str]]] begin[:]
variable[selection] assign[=] list[[<ast.Name object at 0x7da1b1d49090>]]
assert[<ast.UnaryOp object at 0x7da1b1d49240>]
variable[analysis_iterables] assign[=] call[name[copy].copy, parameter[name[analysis_iterables]]]
variable[selected_iterators] assign[=] dictionary[[], []]
for taget[name[s]] in starred[name[selection]] begin[:]
call[name[selected_iterators]][name[s]] assign[=] call[name[analysis_iterables].pop, parameter[name[s]]]
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da1b1d4ac50>]]
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da1b1d48c10>]]
variable[selected_iterators] assign[=] <ast.ListComp object at 0x7da1b1d4ab90>
variable[analysis_iterables] assign[=] <ast.ListComp object at 0x7da1b1d4b430>
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da1b1d4a470>]]
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da1b1d48190>]]
for taget[name[values]] in starred[call[name[itertools].product, parameter[<ast.Starred object at 0x7da1b1d48d90>]]] begin[:]
variable[selected_analysis_objects] assign[=] list[[]]
for taget[name[selected_values]] in starred[call[name[itertools].product, parameter[<ast.Starred object at 0x7da1b1d493c0>]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1d4a3e0>, <ast.Name object at 0x7da1b1d4a890>]]] in starred[call[name[analysis_objects].items, parameter[]]] begin[:]
variable[selected_via_analysis_iterables] assign[=] call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b1d49660>]]
variable[selected_via_selected_iterators] assign[=] call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b1d494b0>]]
variable[selected_obj] assign[=] <ast.BoolOp object at 0x7da1b1d49420>
if name[selected_obj] begin[:]
call[name[selected_analysis_objects].append, parameter[tuple[[<ast.Name object at 0x7da1b1d48ee0>, <ast.Name object at 0x7da1b1d48970>]]]]
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da1b1fbbb20>]]
<ast.Yield object at 0x7da1b1fb9ea0> | keyword[def] identifier[iterate_with_selected_objects_in_order] ( identifier[analysis_objects] : identifier[Mapping] [ identifier[Any] , identifier[Any] ],
identifier[analysis_iterables] : identifier[Dict] [ identifier[str] , identifier[Sequence] [ identifier[Any] ]],
identifier[selection] : identifier[Union] [ identifier[str] , identifier[Sequence] [ identifier[str] ]])-> identifier[Iterator] [ identifier[List] [ identifier[Tuple] [ identifier[Any] , identifier[Any] ]]]:
literal[string]
keyword[if] identifier[isinstance] ( identifier[selection] , identifier[str] ):
identifier[selection] =[ identifier[selection] ]
keyword[assert] keyword[not] identifier[isinstance] ( identifier[selection] , identifier[str] )
identifier[analysis_iterables] = identifier[copy] . identifier[copy] ( identifier[analysis_iterables] )
identifier[selected_iterators] ={}
keyword[for] identifier[s] keyword[in] identifier[selection] :
identifier[selected_iterators] [ identifier[s] ]= identifier[analysis_iterables] . identifier[pop] ( identifier[s] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[selected_iterators] =[[( identifier[k] , identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[values] ] keyword[for] identifier[k] , identifier[values] keyword[in] identifier[selected_iterators] . identifier[items] ()]
identifier[analysis_iterables] =[[( identifier[k] , identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[values] ] keyword[for] identifier[k] , identifier[values] keyword[in] identifier[analysis_iterables] . identifier[items] ()]
identifier[logger] . identifier[debug] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] )
keyword[for] identifier[values] keyword[in] identifier[itertools] . identifier[product] (* identifier[analysis_iterables] ):
identifier[selected_analysis_objects] =[]
keyword[for] identifier[selected_values] keyword[in] identifier[itertools] . identifier[product] (* identifier[selected_iterators] ):
keyword[for] identifier[key_index] , identifier[obj] keyword[in] identifier[analysis_objects] . identifier[items] ():
identifier[selected_via_analysis_iterables] = identifier[all] (
identifier[getattr] ( identifier[key_index] , identifier[k] )== identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[values]
)
identifier[selected_via_selected_iterators] = identifier[all] (
identifier[getattr] ( identifier[key_index] , identifier[k] )== identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[selected_values]
)
identifier[selected_obj] = identifier[selected_via_analysis_iterables] keyword[and] identifier[selected_via_selected_iterators]
keyword[if] identifier[selected_obj] :
identifier[selected_analysis_objects] . identifier[append] (( identifier[key_index] , identifier[obj] ))
identifier[logger] . identifier[debug] ( literal[string] )
keyword[yield] identifier[selected_analysis_objects] | def iterate_with_selected_objects_in_order(analysis_objects: Mapping[Any, Any], analysis_iterables: Dict[str, Sequence[Any]], selection: Union[str, Sequence[str]]) -> Iterator[List[Tuple[Any, Any]]]:
""" Iterate over an analysis dictionary, yielding the selected attributes in order.
So if there are three iterables, a, b, and c, if we selected c, then we iterate over a and b,
and return c in the same order each time for each set of values of a and b. As an example, consider
the set of iterables:
.. code-block:: python
>>> a = ["a1", "a2"]
>>> b = ["b1", "b2"]
>>> c = ["c1", "c2"]
then it will effectively return:
.. code-block:: python
>>> for a_val in a:
... for b_val in b:
... for c_val in c:
... obj(a_val, b_val, c_val)
This will yield:
.. code-block:: python
>>> output = list(iterate_with_selected_objects_in_order(..., selection = ["a"]))
[[("a1", "b1", "c1"), ("a2", "b1", "c1")], [("a1", "b2", "c1"), ("a2", "b2", "c1")], ...]
This is particularly nice because we can then select on a set of iterables to be returned without
having to specify the rest of the iterables that we don't really care about.
Args:
analysis_objects: Analysis objects dictionary.
analysis_iterables: Iterables used in constructing the analysis objects.
selection: Selection of analysis selections to return. Can be either a string or a sequence of
selections.
Yields:
object: Matching analysis object.
"""
# Validation
if isinstance(selection, str):
selection = [selection] # depends on [control=['if'], data=[]]
# Help out mypy. We don't check if it is a list to allow for other sequences.
assert not isinstance(selection, str)
# We don't want to impact the original analysis iterables when we pop some values below.
analysis_iterables = copy.copy(analysis_iterables)
# Extract the selected iterators from the possible iterators so we can select on them later.
# First, we want want each set of iterators to be of the form:
# {"selection1": [value1, value2, ...], "selection2": [value3, value4, ...]}
selected_iterators = {}
for s in selection:
selected_iterators[s] = analysis_iterables.pop(s) # depends on [control=['for'], data=['s']]
logger.debug(f'Initial analysis_iterables: {analysis_iterables}')
logger.debug(f'Initial selected_iterators: {selected_iterators}')
# Now, we convert them to the form:
# [[("selection1", value1), ("selection1", value2)], [("selection2", value3), ("selection2", value4)]]
# This allows them to iterated over conveniently via itertools.product(...)
selected_iterators = [[(k, v) for v in values] for (k, values) in selected_iterators.items()] # type: ignore
analysis_iterables = [[(k, v) for v in values] for (k, values) in analysis_iterables.items()] # type: ignore
logger.debug(f'Final analysis_iterables: {analysis_iterables}')
logger.debug(f'Final selected_iterators: {selected_iterators}')
# Useful debug information, but too verbose for standard usage.
#logger.debug(f"analysis_iterables product: {list(itertools.product(*analysis_iterables))}")
#logger.debug(f"selected_iterators product: {list(itertools.product(*selected_iterators))}")
for values in itertools.product(*analysis_iterables):
selected_analysis_objects = []
for selected_values in itertools.product(*selected_iterators):
for (key_index, obj) in analysis_objects.items():
selected_via_analysis_iterables = all((getattr(key_index, k) == v for (k, v) in values))
selected_via_selected_iterators = all((getattr(key_index, k) == v for (k, v) in selected_values))
selected_obj = selected_via_analysis_iterables and selected_via_selected_iterators
if selected_obj:
selected_analysis_objects.append((key_index, obj)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['selected_values']]
logger.debug(f'Yielding: {selected_analysis_objects}')
yield selected_analysis_objects # depends on [control=['for'], data=['values']] |
def predict(self, u=None, B=None, F=None, Q=None):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array
Optional control vector. If not `None`, it is multiplied by B
to create the control input into the system.
B : np.array(dim_x, dim_z), or None
Optional control transition matrix; a value of None
will cause the filter to use `self.B`.
F : np.array(dim_x, dim_x), or None
Optional state transition matrix; a value of None
will cause the filter to use `self.F`.
Q : np.array(dim_x, dim_x), scalar, or None
Optional process noise matrix; a value of None will cause the
filter to use `self.Q`.
"""
if B is None:
B = self.B
if F is None:
F = self.F
if Q is None:
Q = self.Q
elif isscalar(Q):
Q = eye(self.dim_x) * Q
# x = Fx + Bu
if B is not None and u is not None:
self.x = dot(F, self.x) + dot(B, u)
else:
self.x = dot(F, self.x)
# P = FPF' + Q
self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q
# save prior
self.x_prior = self.x.copy()
self.P_prior = self.P.copy() | def function[predict, parameter[self, u, B, F, Q]]:
constant[
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array
Optional control vector. If not `None`, it is multiplied by B
to create the control input into the system.
B : np.array(dim_x, dim_z), or None
Optional control transition matrix; a value of None
will cause the filter to use `self.B`.
F : np.array(dim_x, dim_x), or None
Optional state transition matrix; a value of None
will cause the filter to use `self.F`.
Q : np.array(dim_x, dim_x), scalar, or None
Optional process noise matrix; a value of None will cause the
filter to use `self.Q`.
]
if compare[name[B] is constant[None]] begin[:]
variable[B] assign[=] name[self].B
if compare[name[F] is constant[None]] begin[:]
variable[F] assign[=] name[self].F
if compare[name[Q] is constant[None]] begin[:]
variable[Q] assign[=] name[self].Q
if <ast.BoolOp object at 0x7da18ede64d0> begin[:]
name[self].x assign[=] binary_operation[call[name[dot], parameter[name[F], name[self].x]] + call[name[dot], parameter[name[B], name[u]]]]
name[self].P assign[=] binary_operation[binary_operation[name[self]._alpha_sq * call[name[dot], parameter[call[name[dot], parameter[name[F], name[self].P]], name[F].T]]] + name[Q]]
name[self].x_prior assign[=] call[name[self].x.copy, parameter[]]
name[self].P_prior assign[=] call[name[self].P.copy, parameter[]] | keyword[def] identifier[predict] ( identifier[self] , identifier[u] = keyword[None] , identifier[B] = keyword[None] , identifier[F] = keyword[None] , identifier[Q] = keyword[None] ):
literal[string]
keyword[if] identifier[B] keyword[is] keyword[None] :
identifier[B] = identifier[self] . identifier[B]
keyword[if] identifier[F] keyword[is] keyword[None] :
identifier[F] = identifier[self] . identifier[F]
keyword[if] identifier[Q] keyword[is] keyword[None] :
identifier[Q] = identifier[self] . identifier[Q]
keyword[elif] identifier[isscalar] ( identifier[Q] ):
identifier[Q] = identifier[eye] ( identifier[self] . identifier[dim_x] )* identifier[Q]
keyword[if] identifier[B] keyword[is] keyword[not] keyword[None] keyword[and] identifier[u] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[x] = identifier[dot] ( identifier[F] , identifier[self] . identifier[x] )+ identifier[dot] ( identifier[B] , identifier[u] )
keyword[else] :
identifier[self] . identifier[x] = identifier[dot] ( identifier[F] , identifier[self] . identifier[x] )
identifier[self] . identifier[P] = identifier[self] . identifier[_alpha_sq] * identifier[dot] ( identifier[dot] ( identifier[F] , identifier[self] . identifier[P] ), identifier[F] . identifier[T] )+ identifier[Q]
identifier[self] . identifier[x_prior] = identifier[self] . identifier[x] . identifier[copy] ()
identifier[self] . identifier[P_prior] = identifier[self] . identifier[P] . identifier[copy] () | def predict(self, u=None, B=None, F=None, Q=None):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array
Optional control vector. If not `None`, it is multiplied by B
to create the control input into the system.
B : np.array(dim_x, dim_z), or None
Optional control transition matrix; a value of None
will cause the filter to use `self.B`.
F : np.array(dim_x, dim_x), or None
Optional state transition matrix; a value of None
will cause the filter to use `self.F`.
Q : np.array(dim_x, dim_x), scalar, or None
Optional process noise matrix; a value of None will cause the
filter to use `self.Q`.
"""
if B is None:
B = self.B # depends on [control=['if'], data=['B']]
if F is None:
F = self.F # depends on [control=['if'], data=['F']]
if Q is None:
Q = self.Q # depends on [control=['if'], data=['Q']]
elif isscalar(Q):
Q = eye(self.dim_x) * Q # depends on [control=['if'], data=[]]
# x = Fx + Bu
if B is not None and u is not None:
self.x = dot(F, self.x) + dot(B, u) # depends on [control=['if'], data=[]]
else:
self.x = dot(F, self.x)
# P = FPF' + Q
self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q
# save prior
self.x_prior = self.x.copy()
self.P_prior = self.P.copy() |
def regexp_filter(self_or_cls, pattern):
"""
Builds a parameter filter using the supplied pattern (may be a
general Python regular expression)
"""
def inner_filter(name, p):
name_match = re.search(pattern,name)
if name_match is not None:
return True
doc_match = re.search(pattern,p.doc)
if doc_match is not None:
return True
return False
return inner_filter | def function[regexp_filter, parameter[self_or_cls, pattern]]:
constant[
Builds a parameter filter using the supplied pattern (may be a
general Python regular expression)
]
def function[inner_filter, parameter[name, p]]:
variable[name_match] assign[=] call[name[re].search, parameter[name[pattern], name[name]]]
if compare[name[name_match] is_not constant[None]] begin[:]
return[constant[True]]
variable[doc_match] assign[=] call[name[re].search, parameter[name[pattern], name[p].doc]]
if compare[name[doc_match] is_not constant[None]] begin[:]
return[constant[True]]
return[constant[False]]
return[name[inner_filter]] | keyword[def] identifier[regexp_filter] ( identifier[self_or_cls] , identifier[pattern] ):
literal[string]
keyword[def] identifier[inner_filter] ( identifier[name] , identifier[p] ):
identifier[name_match] = identifier[re] . identifier[search] ( identifier[pattern] , identifier[name] )
keyword[if] identifier[name_match] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[True]
identifier[doc_match] = identifier[re] . identifier[search] ( identifier[pattern] , identifier[p] . identifier[doc] )
keyword[if] identifier[doc_match] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[return] identifier[inner_filter] | def regexp_filter(self_or_cls, pattern):
"""
Builds a parameter filter using the supplied pattern (may be a
general Python regular expression)
"""
def inner_filter(name, p):
name_match = re.search(pattern, name)
if name_match is not None:
return True # depends on [control=['if'], data=[]]
doc_match = re.search(pattern, p.doc)
if doc_match is not None:
return True # depends on [control=['if'], data=[]]
return False
return inner_filter |
def pix_coord(self, pix_coord):
"""
:type pix_coord: list
:param pix_coord: an x,y pixel coordinate, origin = 1
"""
try:
pix_coord = list(pix_coord)
except:
pass
if not isinstance(pix_coord, list) or len(pix_coord) != 2:
raise ValueError("pix_coord needs to be set with an (x,y) coordinate pair, got {}".format(pix_coord))
x, y = pix_coord
if not isinstance(x, Quantity):
x = float(x) * units.pix
if not isinstance(y, Quantity):
y = float(y) * units.pix
self._pix_coord = x, y | def function[pix_coord, parameter[self, pix_coord]]:
constant[
:type pix_coord: list
:param pix_coord: an x,y pixel coordinate, origin = 1
]
<ast.Try object at 0x7da1b1968eb0>
if <ast.BoolOp object at 0x7da1b196a4a0> begin[:]
<ast.Raise object at 0x7da1b196bc70>
<ast.Tuple object at 0x7da1b1969960> assign[=] name[pix_coord]
if <ast.UnaryOp object at 0x7da1b196ab60> begin[:]
variable[x] assign[=] binary_operation[call[name[float], parameter[name[x]]] * name[units].pix]
if <ast.UnaryOp object at 0x7da1b1a3f340> begin[:]
variable[y] assign[=] binary_operation[call[name[float], parameter[name[y]]] * name[units].pix]
name[self]._pix_coord assign[=] tuple[[<ast.Name object at 0x7da1b1a3fa00>, <ast.Name object at 0x7da1b1a3f0d0>]] | keyword[def] identifier[pix_coord] ( identifier[self] , identifier[pix_coord] ):
literal[string]
keyword[try] :
identifier[pix_coord] = identifier[list] ( identifier[pix_coord] )
keyword[except] :
keyword[pass]
keyword[if] keyword[not] identifier[isinstance] ( identifier[pix_coord] , identifier[list] ) keyword[or] identifier[len] ( identifier[pix_coord] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[pix_coord] ))
identifier[x] , identifier[y] = identifier[pix_coord]
keyword[if] keyword[not] identifier[isinstance] ( identifier[x] , identifier[Quantity] ):
identifier[x] = identifier[float] ( identifier[x] )* identifier[units] . identifier[pix]
keyword[if] keyword[not] identifier[isinstance] ( identifier[y] , identifier[Quantity] ):
identifier[y] = identifier[float] ( identifier[y] )* identifier[units] . identifier[pix]
identifier[self] . identifier[_pix_coord] = identifier[x] , identifier[y] | def pix_coord(self, pix_coord):
"""
:type pix_coord: list
:param pix_coord: an x,y pixel coordinate, origin = 1
"""
try:
pix_coord = list(pix_coord) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
if not isinstance(pix_coord, list) or len(pix_coord) != 2:
raise ValueError('pix_coord needs to be set with an (x,y) coordinate pair, got {}'.format(pix_coord)) # depends on [control=['if'], data=[]]
(x, y) = pix_coord
if not isinstance(x, Quantity):
x = float(x) * units.pix # depends on [control=['if'], data=[]]
if not isinstance(y, Quantity):
y = float(y) * units.pix # depends on [control=['if'], data=[]]
self._pix_coord = (x, y) |
def retry_failure_fab_dev_delete(self, tenant_id, fw_data, fw_dict):
"""Retry the failure cases for delete.
This module calls routine in fabric to retry the failure cases for
delete.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device.
"""
result = fw_data.get('result').split('(')[0]
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
fw_dict['tenant_name'] = name
is_fw_virt = self.is_device_virtual()
if result == fw_constants.RESULT_FW_DELETE_INIT:
if self.fwid_attr[tenant_id].is_fw_drvr_created():
ret = self.delete_fw_device(tenant_id, fw_dict.get('fw_id'),
fw_dict)
if ret:
# Device portion
self.update_fw_db_dev_status(fw_dict.get('fw_id'),
'')
self.fwid_attr[tenant_id].fw_drvr_created(False)
LOG.info("Retry failue dev return success for delete"
" tenant %s", tenant_id)
else:
return
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
ret = self.fabric.retry_failure(tenant_id, name, fw_dict,
is_fw_virt, result)
if not ret:
LOG.error("Retry failure returned fail for tenant %s",
tenant_id)
return
result = fw_constants.RESULT_FW_DELETE_DONE
self.update_fw_db_final_result(fw_dict.get('fw_id'), result)
self.delete_fw(fw_dict.get('fw_id'))
self.fwid_attr[tenant_id].delete_fw(fw_dict.get('fw_id'))
self.tenant_db.del_fw_tenant(fw_dict.get('fw_id')) | def function[retry_failure_fab_dev_delete, parameter[self, tenant_id, fw_data, fw_dict]]:
constant[Retry the failure cases for delete.
This module calls routine in fabric to retry the failure cases for
delete.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device.
]
variable[result] assign[=] call[call[call[name[fw_data].get, parameter[constant[result]]].split, parameter[constant[(]]]][constant[0]]
variable[name] assign[=] call[name[dfa_dbm].DfaDBMixin.get_project_name, parameter[name[self], name[tenant_id]]]
call[name[fw_dict]][constant[tenant_name]] assign[=] name[name]
variable[is_fw_virt] assign[=] call[name[self].is_device_virtual, parameter[]]
if compare[name[result] equal[==] name[fw_constants].RESULT_FW_DELETE_INIT] begin[:]
if call[call[name[self].fwid_attr][name[tenant_id]].is_fw_drvr_created, parameter[]] begin[:]
variable[ret] assign[=] call[name[self].delete_fw_device, parameter[name[tenant_id], call[name[fw_dict].get, parameter[constant[fw_id]]], name[fw_dict]]]
if name[ret] begin[:]
call[name[self].update_fw_db_dev_status, parameter[call[name[fw_dict].get, parameter[constant[fw_id]]], constant[]]]
call[call[name[self].fwid_attr][name[tenant_id]].fw_drvr_created, parameter[constant[False]]]
call[name[LOG].info, parameter[constant[Retry failue dev return success for delete tenant %s], name[tenant_id]]]
variable[name] assign[=] call[name[dfa_dbm].DfaDBMixin.get_project_name, parameter[name[self], name[tenant_id]]]
variable[ret] assign[=] call[name[self].fabric.retry_failure, parameter[name[tenant_id], name[name], name[fw_dict], name[is_fw_virt], name[result]]]
if <ast.UnaryOp object at 0x7da1b1a5cb20> begin[:]
call[name[LOG].error, parameter[constant[Retry failure returned fail for tenant %s], name[tenant_id]]]
return[None]
variable[result] assign[=] name[fw_constants].RESULT_FW_DELETE_DONE
call[name[self].update_fw_db_final_result, parameter[call[name[fw_dict].get, parameter[constant[fw_id]]], name[result]]]
call[name[self].delete_fw, parameter[call[name[fw_dict].get, parameter[constant[fw_id]]]]]
call[call[name[self].fwid_attr][name[tenant_id]].delete_fw, parameter[call[name[fw_dict].get, parameter[constant[fw_id]]]]]
call[name[self].tenant_db.del_fw_tenant, parameter[call[name[fw_dict].get, parameter[constant[fw_id]]]]] | keyword[def] identifier[retry_failure_fab_dev_delete] ( identifier[self] , identifier[tenant_id] , identifier[fw_data] , identifier[fw_dict] ):
literal[string]
identifier[result] = identifier[fw_data] . identifier[get] ( literal[string] ). identifier[split] ( literal[string] )[ literal[int] ]
identifier[name] = identifier[dfa_dbm] . identifier[DfaDBMixin] . identifier[get_project_name] ( identifier[self] , identifier[tenant_id] )
identifier[fw_dict] [ literal[string] ]= identifier[name]
identifier[is_fw_virt] = identifier[self] . identifier[is_device_virtual] ()
keyword[if] identifier[result] == identifier[fw_constants] . identifier[RESULT_FW_DELETE_INIT] :
keyword[if] identifier[self] . identifier[fwid_attr] [ identifier[tenant_id] ]. identifier[is_fw_drvr_created] ():
identifier[ret] = identifier[self] . identifier[delete_fw_device] ( identifier[tenant_id] , identifier[fw_dict] . identifier[get] ( literal[string] ),
identifier[fw_dict] )
keyword[if] identifier[ret] :
identifier[self] . identifier[update_fw_db_dev_status] ( identifier[fw_dict] . identifier[get] ( literal[string] ),
literal[string] )
identifier[self] . identifier[fwid_attr] [ identifier[tenant_id] ]. identifier[fw_drvr_created] ( keyword[False] )
identifier[LOG] . identifier[info] ( literal[string]
literal[string] , identifier[tenant_id] )
keyword[else] :
keyword[return]
identifier[name] = identifier[dfa_dbm] . identifier[DfaDBMixin] . identifier[get_project_name] ( identifier[self] , identifier[tenant_id] )
identifier[ret] = identifier[self] . identifier[fabric] . identifier[retry_failure] ( identifier[tenant_id] , identifier[name] , identifier[fw_dict] ,
identifier[is_fw_virt] , identifier[result] )
keyword[if] keyword[not] identifier[ret] :
identifier[LOG] . identifier[error] ( literal[string] ,
identifier[tenant_id] )
keyword[return]
identifier[result] = identifier[fw_constants] . identifier[RESULT_FW_DELETE_DONE]
identifier[self] . identifier[update_fw_db_final_result] ( identifier[fw_dict] . identifier[get] ( literal[string] ), identifier[result] )
identifier[self] . identifier[delete_fw] ( identifier[fw_dict] . identifier[get] ( literal[string] ))
identifier[self] . identifier[fwid_attr] [ identifier[tenant_id] ]. identifier[delete_fw] ( identifier[fw_dict] . identifier[get] ( literal[string] ))
identifier[self] . identifier[tenant_db] . identifier[del_fw_tenant] ( identifier[fw_dict] . identifier[get] ( literal[string] )) | def retry_failure_fab_dev_delete(self, tenant_id, fw_data, fw_dict):
"""Retry the failure cases for delete.
This module calls routine in fabric to retry the failure cases for
delete.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device.
"""
result = fw_data.get('result').split('(')[0]
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
fw_dict['tenant_name'] = name
is_fw_virt = self.is_device_virtual()
if result == fw_constants.RESULT_FW_DELETE_INIT:
if self.fwid_attr[tenant_id].is_fw_drvr_created():
ret = self.delete_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict)
if ret:
# Device portion
self.update_fw_db_dev_status(fw_dict.get('fw_id'), '')
self.fwid_attr[tenant_id].fw_drvr_created(False)
LOG.info('Retry failue dev return success for delete tenant %s', tenant_id) # depends on [control=['if'], data=[]]
else:
return # depends on [control=['if'], data=[]]
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
ret = self.fabric.retry_failure(tenant_id, name, fw_dict, is_fw_virt, result)
if not ret:
LOG.error('Retry failure returned fail for tenant %s', tenant_id)
return # depends on [control=['if'], data=[]]
result = fw_constants.RESULT_FW_DELETE_DONE
self.update_fw_db_final_result(fw_dict.get('fw_id'), result)
self.delete_fw(fw_dict.get('fw_id'))
self.fwid_attr[tenant_id].delete_fw(fw_dict.get('fw_id'))
self.tenant_db.del_fw_tenant(fw_dict.get('fw_id')) # depends on [control=['if'], data=['result']] |
def int_0_inf(cls, string):
'''Convert string to int.
If ``inf`` is supplied, it returns ``0``.
'''
if string == 'inf':
return 0
try:
value = int(string)
except ValueError as error:
raise argparse.ArgumentTypeError(error)
if value < 0:
raise argparse.ArgumentTypeError(_('Value must not be negative.'))
else:
return value | def function[int_0_inf, parameter[cls, string]]:
constant[Convert string to int.
If ``inf`` is supplied, it returns ``0``.
]
if compare[name[string] equal[==] constant[inf]] begin[:]
return[constant[0]]
<ast.Try object at 0x7da20cabcd60>
if compare[name[value] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da20cabdff0> | keyword[def] identifier[int_0_inf] ( identifier[cls] , identifier[string] ):
literal[string]
keyword[if] identifier[string] == literal[string] :
keyword[return] literal[int]
keyword[try] :
identifier[value] = identifier[int] ( identifier[string] )
keyword[except] identifier[ValueError] keyword[as] identifier[error] :
keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( identifier[error] )
keyword[if] identifier[value] < literal[int] :
keyword[raise] identifier[argparse] . identifier[ArgumentTypeError] ( identifier[_] ( literal[string] ))
keyword[else] :
keyword[return] identifier[value] | def int_0_inf(cls, string):
"""Convert string to int.
If ``inf`` is supplied, it returns ``0``.
"""
if string == 'inf':
return 0 # depends on [control=['if'], data=[]]
try:
value = int(string) # depends on [control=['try'], data=[]]
except ValueError as error:
raise argparse.ArgumentTypeError(error) # depends on [control=['except'], data=['error']]
if value < 0:
raise argparse.ArgumentTypeError(_('Value must not be negative.')) # depends on [control=['if'], data=[]]
else:
return value |
def _set_oversampling(self, value):
"""
This is a private method because it's used in the initializer by the
``oversampling``
"""
try:
value = np.atleast_1d(value).astype(float)
if len(value) == 1:
value = np.repeat(value, 2)
except ValueError:
raise ValueError('Oversampling factors must be float')
if np.any(value <= 0):
raise ValueError('Oversampling factors must be greater than 0')
self._oversampling = value | def function[_set_oversampling, parameter[self, value]]:
constant[
This is a private method because it's used in the initializer by the
``oversampling``
]
<ast.Try object at 0x7da1b1148e50>
if call[name[np].any, parameter[compare[name[value] less_or_equal[<=] constant[0]]]] begin[:]
<ast.Raise object at 0x7da1b114b250>
name[self]._oversampling assign[=] name[value] | keyword[def] identifier[_set_oversampling] ( identifier[self] , identifier[value] ):
literal[string]
keyword[try] :
identifier[value] = identifier[np] . identifier[atleast_1d] ( identifier[value] ). identifier[astype] ( identifier[float] )
keyword[if] identifier[len] ( identifier[value] )== literal[int] :
identifier[value] = identifier[np] . identifier[repeat] ( identifier[value] , literal[int] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[np] . identifier[any] ( identifier[value] <= literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_oversampling] = identifier[value] | def _set_oversampling(self, value):
"""
This is a private method because it's used in the initializer by the
``oversampling``
"""
try:
value = np.atleast_1d(value).astype(float)
if len(value) == 1:
value = np.repeat(value, 2) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('Oversampling factors must be float') # depends on [control=['except'], data=[]]
if np.any(value <= 0):
raise ValueError('Oversampling factors must be greater than 0') # depends on [control=['if'], data=[]]
self._oversampling = value |
def disown(debug):
"""This function will disown, so the Ardexa service can be restarted"""
# Get the current PID
pid = os.getpid()
cgroup_file = "/proc/" + str(pid) + "/cgroup"
try:
infile = open(cgroup_file, "r")
except IOError:
print("Could not open cgroup file: ", cgroup_file)
return False
# Read each line
for line in infile:
# Check if the line contains "ardexa.service"
if line.find("ardexa.service") == -1:
continue
# if the lines contains "name=", replace it with nothing
line = line.replace("name=", "")
# Split the line by commas
items_list = line.split(':')
accounts = items_list[1]
dir_str = accounts + "/ardexa.disown"
# If accounts is empty, continue
if not accounts:
continue
# Create the dir and all subdirs
full_dir = "/sys/fs/cgroup/" + dir_str
if not os.path.exists(full_dir):
os.makedirs(full_dir)
if debug >= 1:
print("Making directory: ", full_dir)
else:
if debug >= 1:
print("Directory already exists: ", full_dir)
# Add the PID to the file
full_path = full_dir + "/cgroup.procs"
prog_list = ["echo", str(pid), ">", full_path]
run_program(prog_list, debug, True)
# If this item contains a comma, then separate it, and reverse
# some OSes will need cpuacct,cpu reversed to actually work
if accounts.find(",") != -1:
acct_list = accounts.split(',')
accounts = acct_list[1] + "," + acct_list[0]
dir_str = accounts + "/ardexa.disown"
# Create the dir and all subdirs. But it may not work. So use a TRY
full_dir = "/sys/fs/cgroup/" + dir_str
try:
if not os.path.exists(full_dir):
os.makedirs(full_dir)
except:
continue
# Add the PID to the file
full_path = full_dir + "/cgroup.procs"
prog_list = ["echo", str(pid), ">", full_path]
run_program(prog_list, debug, True)
infile.close()
# For debug purposes only
if debug >= 1:
prog_list = ["cat", cgroup_file]
run_program(prog_list, debug, False)
# If there are any "ardexa.service" in the proc file. If so, exit with error
prog_list = ["grep", "-q", "ardexa.service", cgroup_file]
if run_program(prog_list, debug, False):
# There are entries still left in the file
return False
return True | def function[disown, parameter[debug]]:
constant[This function will disown, so the Ardexa service can be restarted]
variable[pid] assign[=] call[name[os].getpid, parameter[]]
variable[cgroup_file] assign[=] binary_operation[binary_operation[constant[/proc/] + call[name[str], parameter[name[pid]]]] + constant[/cgroup]]
<ast.Try object at 0x7da18ede5a50>
for taget[name[line]] in starred[name[infile]] begin[:]
if compare[call[name[line].find, parameter[constant[ardexa.service]]] equal[==] <ast.UnaryOp object at 0x7da20e9b11e0>] begin[:]
continue
variable[line] assign[=] call[name[line].replace, parameter[constant[name=], constant[]]]
variable[items_list] assign[=] call[name[line].split, parameter[constant[:]]]
variable[accounts] assign[=] call[name[items_list]][constant[1]]
variable[dir_str] assign[=] binary_operation[name[accounts] + constant[/ardexa.disown]]
if <ast.UnaryOp object at 0x7da18ede6ec0> begin[:]
continue
variable[full_dir] assign[=] binary_operation[constant[/sys/fs/cgroup/] + name[dir_str]]
if <ast.UnaryOp object at 0x7da18ede4ac0> begin[:]
call[name[os].makedirs, parameter[name[full_dir]]]
if compare[name[debug] greater_or_equal[>=] constant[1]] begin[:]
call[name[print], parameter[constant[Making directory: ], name[full_dir]]]
variable[full_path] assign[=] binary_operation[name[full_dir] + constant[/cgroup.procs]]
variable[prog_list] assign[=] list[[<ast.Constant object at 0x7da18ede6290>, <ast.Call object at 0x7da18ede7160>, <ast.Constant object at 0x7da18ede75e0>, <ast.Name object at 0x7da18ede57e0>]]
call[name[run_program], parameter[name[prog_list], name[debug], constant[True]]]
if compare[call[name[accounts].find, parameter[constant[,]]] not_equal[!=] <ast.UnaryOp object at 0x7da18ede5ab0>] begin[:]
variable[acct_list] assign[=] call[name[accounts].split, parameter[constant[,]]]
variable[accounts] assign[=] binary_operation[binary_operation[call[name[acct_list]][constant[1]] + constant[,]] + call[name[acct_list]][constant[0]]]
variable[dir_str] assign[=] binary_operation[name[accounts] + constant[/ardexa.disown]]
variable[full_dir] assign[=] binary_operation[constant[/sys/fs/cgroup/] + name[dir_str]]
<ast.Try object at 0x7da18ede6b90>
variable[full_path] assign[=] binary_operation[name[full_dir] + constant[/cgroup.procs]]
variable[prog_list] assign[=] list[[<ast.Constant object at 0x7da18ede6e00>, <ast.Call object at 0x7da18ede52a0>, <ast.Constant object at 0x7da1afe50340>, <ast.Name object at 0x7da1afe52950>]]
call[name[run_program], parameter[name[prog_list], name[debug], constant[True]]]
call[name[infile].close, parameter[]]
if compare[name[debug] greater_or_equal[>=] constant[1]] begin[:]
variable[prog_list] assign[=] list[[<ast.Constant object at 0x7da20c76d1b0>, <ast.Name object at 0x7da20c76da20>]]
call[name[run_program], parameter[name[prog_list], name[debug], constant[False]]]
variable[prog_list] assign[=] list[[<ast.Constant object at 0x7da20c76e080>, <ast.Constant object at 0x7da20c76d8a0>, <ast.Constant object at 0x7da20c76f0d0>, <ast.Name object at 0x7da20c76e680>]]
if call[name[run_program], parameter[name[prog_list], name[debug], constant[False]]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[disown] ( identifier[debug] ):
literal[string]
identifier[pid] = identifier[os] . identifier[getpid] ()
identifier[cgroup_file] = literal[string] + identifier[str] ( identifier[pid] )+ literal[string]
keyword[try] :
identifier[infile] = identifier[open] ( identifier[cgroup_file] , literal[string] )
keyword[except] identifier[IOError] :
identifier[print] ( literal[string] , identifier[cgroup_file] )
keyword[return] keyword[False]
keyword[for] identifier[line] keyword[in] identifier[infile] :
keyword[if] identifier[line] . identifier[find] ( literal[string] )==- literal[int] :
keyword[continue]
identifier[line] = identifier[line] . identifier[replace] ( literal[string] , literal[string] )
identifier[items_list] = identifier[line] . identifier[split] ( literal[string] )
identifier[accounts] = identifier[items_list] [ literal[int] ]
identifier[dir_str] = identifier[accounts] + literal[string]
keyword[if] keyword[not] identifier[accounts] :
keyword[continue]
identifier[full_dir] = literal[string] + identifier[dir_str]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[full_dir] ):
identifier[os] . identifier[makedirs] ( identifier[full_dir] )
keyword[if] identifier[debug] >= literal[int] :
identifier[print] ( literal[string] , identifier[full_dir] )
keyword[else] :
keyword[if] identifier[debug] >= literal[int] :
identifier[print] ( literal[string] , identifier[full_dir] )
identifier[full_path] = identifier[full_dir] + literal[string]
identifier[prog_list] =[ literal[string] , identifier[str] ( identifier[pid] ), literal[string] , identifier[full_path] ]
identifier[run_program] ( identifier[prog_list] , identifier[debug] , keyword[True] )
keyword[if] identifier[accounts] . identifier[find] ( literal[string] )!=- literal[int] :
identifier[acct_list] = identifier[accounts] . identifier[split] ( literal[string] )
identifier[accounts] = identifier[acct_list] [ literal[int] ]+ literal[string] + identifier[acct_list] [ literal[int] ]
identifier[dir_str] = identifier[accounts] + literal[string]
identifier[full_dir] = literal[string] + identifier[dir_str]
keyword[try] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[full_dir] ):
identifier[os] . identifier[makedirs] ( identifier[full_dir] )
keyword[except] :
keyword[continue]
identifier[full_path] = identifier[full_dir] + literal[string]
identifier[prog_list] =[ literal[string] , identifier[str] ( identifier[pid] ), literal[string] , identifier[full_path] ]
identifier[run_program] ( identifier[prog_list] , identifier[debug] , keyword[True] )
identifier[infile] . identifier[close] ()
keyword[if] identifier[debug] >= literal[int] :
identifier[prog_list] =[ literal[string] , identifier[cgroup_file] ]
identifier[run_program] ( identifier[prog_list] , identifier[debug] , keyword[False] )
identifier[prog_list] =[ literal[string] , literal[string] , literal[string] , identifier[cgroup_file] ]
keyword[if] identifier[run_program] ( identifier[prog_list] , identifier[debug] , keyword[False] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def disown(debug):
"""This function will disown, so the Ardexa service can be restarted"""
# Get the current PID
pid = os.getpid()
cgroup_file = '/proc/' + str(pid) + '/cgroup'
try:
infile = open(cgroup_file, 'r') # depends on [control=['try'], data=[]]
except IOError:
print('Could not open cgroup file: ', cgroup_file)
return False # depends on [control=['except'], data=[]]
# Read each line
for line in infile:
# Check if the line contains "ardexa.service"
if line.find('ardexa.service') == -1:
continue # depends on [control=['if'], data=[]]
# if the lines contains "name=", replace it with nothing
line = line.replace('name=', '')
# Split the line by commas
items_list = line.split(':')
accounts = items_list[1]
dir_str = accounts + '/ardexa.disown'
# If accounts is empty, continue
if not accounts:
continue # depends on [control=['if'], data=[]]
# Create the dir and all subdirs
full_dir = '/sys/fs/cgroup/' + dir_str
if not os.path.exists(full_dir):
os.makedirs(full_dir)
if debug >= 1:
print('Making directory: ', full_dir) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif debug >= 1:
print('Directory already exists: ', full_dir) # depends on [control=['if'], data=[]]
# Add the PID to the file
full_path = full_dir + '/cgroup.procs'
prog_list = ['echo', str(pid), '>', full_path]
run_program(prog_list, debug, True)
# If this item contains a comma, then separate it, and reverse
# some OSes will need cpuacct,cpu reversed to actually work
if accounts.find(',') != -1:
acct_list = accounts.split(',')
accounts = acct_list[1] + ',' + acct_list[0]
dir_str = accounts + '/ardexa.disown'
# Create the dir and all subdirs. But it may not work. So use a TRY
full_dir = '/sys/fs/cgroup/' + dir_str
try:
if not os.path.exists(full_dir):
os.makedirs(full_dir) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
continue # depends on [control=['except'], data=[]]
# Add the PID to the file
full_path = full_dir + '/cgroup.procs'
prog_list = ['echo', str(pid), '>', full_path]
run_program(prog_list, debug, True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
infile.close()
# For debug purposes only
if debug >= 1:
prog_list = ['cat', cgroup_file]
run_program(prog_list, debug, False) # depends on [control=['if'], data=['debug']]
# If there are any "ardexa.service" in the proc file. If so, exit with error
prog_list = ['grep', '-q', 'ardexa.service', cgroup_file]
if run_program(prog_list, debug, False):
# There are entries still left in the file
return False # depends on [control=['if'], data=[]]
return True |
def similar_item_values(self, key, replaces):
"""
Returns a list of values for all variants of the ``key``
in this DAWG according to ``replaces``.
``replaces`` is an object obtained from
``DAWG.compile_replaces(mapping)`` where mapping is a dict
that maps single-char unicode sitrings to another single-char
unicode strings.
"""
return self._similar_item_values(0, key, self.dct.ROOT, replaces) | def function[similar_item_values, parameter[self, key, replaces]]:
constant[
Returns a list of values for all variants of the ``key``
in this DAWG according to ``replaces``.
``replaces`` is an object obtained from
``DAWG.compile_replaces(mapping)`` where mapping is a dict
that maps single-char unicode sitrings to another single-char
unicode strings.
]
return[call[name[self]._similar_item_values, parameter[constant[0], name[key], name[self].dct.ROOT, name[replaces]]]] | keyword[def] identifier[similar_item_values] ( identifier[self] , identifier[key] , identifier[replaces] ):
literal[string]
keyword[return] identifier[self] . identifier[_similar_item_values] ( literal[int] , identifier[key] , identifier[self] . identifier[dct] . identifier[ROOT] , identifier[replaces] ) | def similar_item_values(self, key, replaces):
"""
Returns a list of values for all variants of the ``key``
in this DAWG according to ``replaces``.
``replaces`` is an object obtained from
``DAWG.compile_replaces(mapping)`` where mapping is a dict
that maps single-char unicode sitrings to another single-char
unicode strings.
"""
return self._similar_item_values(0, key, self.dct.ROOT, replaces) |
def PCA(x, n=False):
"""
Principal component analysis function.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
**Kwargs:**
* `n` : number of features returned (integer) - how many columns
should the output keep
**Returns:**
* `new_x` : matrix with reduced size (lower number of columns)
"""
# select n if not provided
if not n:
n = x.shape[1] - 1
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
assert type(n) == int, "Provided n is not an integer."
assert x.shape[1] > n, "The requested n is bigger than \
number of features in x."
# eigen values and eigen vectors of data covariance matrix
eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T))
# sort eigen vectors according biggest eigen value
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
# form output - reduced x matrix
return eigen_order[:n].dot(x.T).T | def function[PCA, parameter[x, n]]:
constant[
Principal component analysis function.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
**Kwargs:**
* `n` : number of features returned (integer) - how many columns
should the output keep
**Returns:**
* `new_x` : matrix with reduced size (lower number of columns)
]
if <ast.UnaryOp object at 0x7da1b0efd720> begin[:]
variable[n] assign[=] binary_operation[call[name[x].shape][constant[1]] - constant[1]]
<ast.Try object at 0x7da1b0efdc00>
assert[compare[call[name[type], parameter[name[n]]] equal[==] name[int]]]
assert[compare[call[name[x].shape][constant[1]] greater[>] name[n]]]
<ast.Tuple object at 0x7da1b0efd240> assign[=] call[name[np].linalg.eig, parameter[call[name[np].cov, parameter[name[x].T]]]]
variable[eigen_order] assign[=] call[name[eigen_vectors].T][call[<ast.UnaryOp object at 0x7da1b0efc880>.argsort, parameter[]]]
return[call[call[name[eigen_order]][<ast.Slice object at 0x7da1b0efc940>].dot, parameter[name[x].T]].T] | keyword[def] identifier[PCA] ( identifier[x] , identifier[n] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[n] :
identifier[n] = identifier[x] . identifier[shape] [ literal[int] ]- literal[int]
keyword[try] :
identifier[x] = identifier[np] . identifier[array] ( identifier[x] )
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[assert] identifier[type] ( identifier[n] )== identifier[int] , literal[string]
keyword[assert] identifier[x] . identifier[shape] [ literal[int] ]> identifier[n] , literal[string]
identifier[eigen_values] , identifier[eigen_vectors] = identifier[np] . identifier[linalg] . identifier[eig] ( identifier[np] . identifier[cov] ( identifier[x] . identifier[T] ))
identifier[eigen_order] = identifier[eigen_vectors] . identifier[T] [(- identifier[eigen_values] ). identifier[argsort] ()]
keyword[return] identifier[eigen_order] [: identifier[n] ]. identifier[dot] ( identifier[x] . identifier[T] ). identifier[T] | def PCA(x, n=False):
"""
Principal component analysis function.
**Args:**
* `x` : input matrix (2d array), every row represents new sample
**Kwargs:**
* `n` : number of features returned (integer) - how many columns
should the output keep
**Returns:**
* `new_x` : matrix with reduced size (lower number of columns)
"""
# select n if not provided
if not n:
n = x.shape[1] - 1 # depends on [control=['if'], data=[]]
# validate inputs
try:
x = np.array(x) # depends on [control=['try'], data=[]]
except:
raise ValueError('Impossible to convert x to a numpy array.') # depends on [control=['except'], data=[]]
assert type(n) == int, 'Provided n is not an integer.'
assert x.shape[1] > n, 'The requested n is bigger than number of features in x.'
# eigen values and eigen vectors of data covariance matrix
(eigen_values, eigen_vectors) = np.linalg.eig(np.cov(x.T))
# sort eigen vectors according biggest eigen value
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
# form output - reduced x matrix
return eigen_order[:n].dot(x.T).T |
def create_fw_db(self, fw_id, fw_name, tenant_id):
"""Create FW dict. """
fw_dict = {'fw_id': fw_id, 'name': fw_name, 'tenant_id': tenant_id}
# FW DB is already created by FW Mgr
# self.add_fw_db(fw_id, fw_dict)
self.update_fw_dict(fw_dict) | def function[create_fw_db, parameter[self, fw_id, fw_name, tenant_id]]:
constant[Create FW dict. ]
variable[fw_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b1b6d0>, <ast.Constant object at 0x7da1b1b1a0e0>, <ast.Constant object at 0x7da1b1b1a8f0>], [<ast.Name object at 0x7da1b1b1a050>, <ast.Name object at 0x7da1b1b19bd0>, <ast.Name object at 0x7da1b1b1b790>]]
call[name[self].update_fw_dict, parameter[name[fw_dict]]] | keyword[def] identifier[create_fw_db] ( identifier[self] , identifier[fw_id] , identifier[fw_name] , identifier[tenant_id] ):
literal[string]
identifier[fw_dict] ={ literal[string] : identifier[fw_id] , literal[string] : identifier[fw_name] , literal[string] : identifier[tenant_id] }
identifier[self] . identifier[update_fw_dict] ( identifier[fw_dict] ) | def create_fw_db(self, fw_id, fw_name, tenant_id):
"""Create FW dict. """
fw_dict = {'fw_id': fw_id, 'name': fw_name, 'tenant_id': tenant_id}
# FW DB is already created by FW Mgr
# self.add_fw_db(fw_id, fw_dict)
self.update_fw_dict(fw_dict) |
def connect(self):
"""Attempt to connect to hardware immediately. Will not retry.
Check freshroastsr700.connected or freshroastsr700.connect_state
to verify result.
Raises:
freshroastsr700.exeptions.RoasterLookupError
No hardware connected to the computer.
"""
self._start_connect(self.CA_SINGLE_SHOT)
while(self._connect_state.value == self.CS_ATTEMPTING_CONNECT or
self._connect_state.value == self.CS_CONNECTING):
time.sleep(0.1)
if self.CS_CONNECTED != self._connect_state.value:
raise exceptions.RoasterLookupError | def function[connect, parameter[self]]:
constant[Attempt to connect to hardware immediately. Will not retry.
Check freshroastsr700.connected or freshroastsr700.connect_state
to verify result.
Raises:
freshroastsr700.exeptions.RoasterLookupError
No hardware connected to the computer.
]
call[name[self]._start_connect, parameter[name[self].CA_SINGLE_SHOT]]
while <ast.BoolOp object at 0x7da18bccb8b0> begin[:]
call[name[time].sleep, parameter[constant[0.1]]]
if compare[name[self].CS_CONNECTED not_equal[!=] name[self]._connect_state.value] begin[:]
<ast.Raise object at 0x7da1b11ef880> | keyword[def] identifier[connect] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_start_connect] ( identifier[self] . identifier[CA_SINGLE_SHOT] )
keyword[while] ( identifier[self] . identifier[_connect_state] . identifier[value] == identifier[self] . identifier[CS_ATTEMPTING_CONNECT] keyword[or]
identifier[self] . identifier[_connect_state] . identifier[value] == identifier[self] . identifier[CS_CONNECTING] ):
identifier[time] . identifier[sleep] ( literal[int] )
keyword[if] identifier[self] . identifier[CS_CONNECTED] != identifier[self] . identifier[_connect_state] . identifier[value] :
keyword[raise] identifier[exceptions] . identifier[RoasterLookupError] | def connect(self):
"""Attempt to connect to hardware immediately. Will not retry.
Check freshroastsr700.connected or freshroastsr700.connect_state
to verify result.
Raises:
freshroastsr700.exeptions.RoasterLookupError
No hardware connected to the computer.
"""
self._start_connect(self.CA_SINGLE_SHOT)
while self._connect_state.value == self.CS_ATTEMPTING_CONNECT or self._connect_state.value == self.CS_CONNECTING:
time.sleep(0.1) # depends on [control=['while'], data=[]]
if self.CS_CONNECTED != self._connect_state.value:
raise exceptions.RoasterLookupError # depends on [control=['if'], data=[]] |
def _hash(self, string, hash_type):
"""Hash a string using MPQ's hash function."""
hash_types = {
'TABLE_OFFSET': 0,
'HASH_A': 1,
'HASH_B': 2,
'TABLE': 3
}
seed1 = 0x7FED7FED
seed2 = 0xEEEEEEEE
for ch in string.upper():
if not isinstance(ch, int): ch = ord(ch)
value = self.encryption_table[(hash_types[hash_type] << 8) + ch]
seed1 = (value ^ (seed1 + seed2)) & 0xFFFFFFFF
seed2 = ch + seed1 + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF
return seed1 | def function[_hash, parameter[self, string, hash_type]]:
constant[Hash a string using MPQ's hash function.]
variable[hash_types] assign[=] dictionary[[<ast.Constant object at 0x7da1b265f190>, <ast.Constant object at 0x7da1b265fc10>, <ast.Constant object at 0x7da1b265e230>, <ast.Constant object at 0x7da1b265f130>], [<ast.Constant object at 0x7da1b265e920>, <ast.Constant object at 0x7da1b265f340>, <ast.Constant object at 0x7da1b265f5e0>, <ast.Constant object at 0x7da1b265f280>]]
variable[seed1] assign[=] constant[2146271213]
variable[seed2] assign[=] constant[4008636142]
for taget[name[ch]] in starred[call[name[string].upper, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b265ef50> begin[:]
variable[ch] assign[=] call[name[ord], parameter[name[ch]]]
variable[value] assign[=] call[name[self].encryption_table][binary_operation[binary_operation[call[name[hash_types]][name[hash_type]] <ast.LShift object at 0x7da2590d69e0> constant[8]] + name[ch]]]
variable[seed1] assign[=] binary_operation[binary_operation[name[value] <ast.BitXor object at 0x7da2590d6b00> binary_operation[name[seed1] + name[seed2]]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
variable[seed2] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[ch] + name[seed1]] + name[seed2]] + binary_operation[name[seed2] <ast.LShift object at 0x7da2590d69e0> constant[5]]] + constant[3]] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]
return[name[seed1]] | keyword[def] identifier[_hash] ( identifier[self] , identifier[string] , identifier[hash_type] ):
literal[string]
identifier[hash_types] ={
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int]
}
identifier[seed1] = literal[int]
identifier[seed2] = literal[int]
keyword[for] identifier[ch] keyword[in] identifier[string] . identifier[upper] ():
keyword[if] keyword[not] identifier[isinstance] ( identifier[ch] , identifier[int] ): identifier[ch] = identifier[ord] ( identifier[ch] )
identifier[value] = identifier[self] . identifier[encryption_table] [( identifier[hash_types] [ identifier[hash_type] ]<< literal[int] )+ identifier[ch] ]
identifier[seed1] =( identifier[value] ^( identifier[seed1] + identifier[seed2] ))& literal[int]
identifier[seed2] = identifier[ch] + identifier[seed1] + identifier[seed2] +( identifier[seed2] << literal[int] )+ literal[int] & literal[int]
keyword[return] identifier[seed1] | def _hash(self, string, hash_type):
"""Hash a string using MPQ's hash function."""
hash_types = {'TABLE_OFFSET': 0, 'HASH_A': 1, 'HASH_B': 2, 'TABLE': 3}
seed1 = 2146271213
seed2 = 4008636142
for ch in string.upper():
if not isinstance(ch, int):
ch = ord(ch) # depends on [control=['if'], data=[]]
value = self.encryption_table[(hash_types[hash_type] << 8) + ch]
seed1 = (value ^ seed1 + seed2) & 4294967295
seed2 = ch + seed1 + seed2 + (seed2 << 5) + 3 & 4294967295 # depends on [control=['for'], data=['ch']]
return seed1 |
def eval_algorithm(curr, prev):
""" Evaluates OBV
Args:
curr: Dict of current volume and close
prev: Dict of previous OBV and close
Returns:
Float of OBV
"""
if curr['close'] > prev['close']:
v = curr['volume']
elif curr['close'] < prev['close']:
v = curr['volume'] * -1
else:
v = 0
return prev['obv'] + v | def function[eval_algorithm, parameter[curr, prev]]:
constant[ Evaluates OBV
Args:
curr: Dict of current volume and close
prev: Dict of previous OBV and close
Returns:
Float of OBV
]
if compare[call[name[curr]][constant[close]] greater[>] call[name[prev]][constant[close]]] begin[:]
variable[v] assign[=] call[name[curr]][constant[volume]]
return[binary_operation[call[name[prev]][constant[obv]] + name[v]]] | keyword[def] identifier[eval_algorithm] ( identifier[curr] , identifier[prev] ):
literal[string]
keyword[if] identifier[curr] [ literal[string] ]> identifier[prev] [ literal[string] ]:
identifier[v] = identifier[curr] [ literal[string] ]
keyword[elif] identifier[curr] [ literal[string] ]< identifier[prev] [ literal[string] ]:
identifier[v] = identifier[curr] [ literal[string] ]*- literal[int]
keyword[else] :
identifier[v] = literal[int]
keyword[return] identifier[prev] [ literal[string] ]+ identifier[v] | def eval_algorithm(curr, prev):
""" Evaluates OBV
Args:
curr: Dict of current volume and close
prev: Dict of previous OBV and close
Returns:
Float of OBV
"""
if curr['close'] > prev['close']:
v = curr['volume'] # depends on [control=['if'], data=[]]
elif curr['close'] < prev['close']:
v = curr['volume'] * -1 # depends on [control=['if'], data=[]]
else:
v = 0
return prev['obv'] + v |
def from_soup(self,author,soup):
"""
Factory Pattern. Fetches contact data from given soup and builds the object
"""
email = soup.find('span',class_='icon icon-mail').findParent('a').get('href').split(':')[-1] if soup.find('span',class_='icon icon-mail') else ''
facebook = soup.find('span',class_='icon icon-facebook').findParent('a').get('href') if soup.find('span',class_='icon icon-facebook') else ''
twitter = soup.find('span',class_='icon icon-twitter-3').findParent('a').get('href') if soup.find('span',class_='icon icon-twitter-3') else ''
link = soup.find('span',class_='icon icon-link').findParent('a').get('href') if soup.find('span',class_='icon icon-link') else ''
return Contact(email,facebook,twitter,link) | def function[from_soup, parameter[self, author, soup]]:
constant[
Factory Pattern. Fetches contact data from given soup and builds the object
]
variable[email] assign[=] <ast.IfExp object at 0x7da20c76f580>
variable[facebook] assign[=] <ast.IfExp object at 0x7da20c76f490>
variable[twitter] assign[=] <ast.IfExp object at 0x7da20c76ee00>
variable[link] assign[=] <ast.IfExp object at 0x7da20c76f2e0>
return[call[name[Contact], parameter[name[email], name[facebook], name[twitter], name[link]]]] | keyword[def] identifier[from_soup] ( identifier[self] , identifier[author] , identifier[soup] ):
literal[string]
identifier[email] = identifier[soup] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ). identifier[findParent] ( literal[string] ). identifier[get] ( literal[string] ). identifier[split] ( literal[string] )[- literal[int] ] keyword[if] identifier[soup] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ) keyword[else] literal[string]
identifier[facebook] = identifier[soup] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ). identifier[findParent] ( literal[string] ). identifier[get] ( literal[string] ) keyword[if] identifier[soup] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ) keyword[else] literal[string]
identifier[twitter] = identifier[soup] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ). identifier[findParent] ( literal[string] ). identifier[get] ( literal[string] ) keyword[if] identifier[soup] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ) keyword[else] literal[string]
identifier[link] = identifier[soup] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ). identifier[findParent] ( literal[string] ). identifier[get] ( literal[string] ) keyword[if] identifier[soup] . identifier[find] ( literal[string] , identifier[class_] = literal[string] ) keyword[else] literal[string]
keyword[return] identifier[Contact] ( identifier[email] , identifier[facebook] , identifier[twitter] , identifier[link] ) | def from_soup(self, author, soup):
"""
Factory Pattern. Fetches contact data from given soup and builds the object
"""
email = soup.find('span', class_='icon icon-mail').findParent('a').get('href').split(':')[-1] if soup.find('span', class_='icon icon-mail') else ''
facebook = soup.find('span', class_='icon icon-facebook').findParent('a').get('href') if soup.find('span', class_='icon icon-facebook') else ''
twitter = soup.find('span', class_='icon icon-twitter-3').findParent('a').get('href') if soup.find('span', class_='icon icon-twitter-3') else ''
link = soup.find('span', class_='icon icon-link').findParent('a').get('href') if soup.find('span', class_='icon icon-link') else ''
return Contact(email, facebook, twitter, link) |
def namedb_get_all_account_addresses(cur):
"""
TESTING ONLY
get all account addresses
"""
assert BLOCKSTACK_TEST, 'BUG: this method is only available in test mode'
sql = 'SELECT DISTINCT address FROM accounts;'
args = ()
rows = namedb_query_execute(cur, sql, args)
ret = []
for rowdata in rows:
ret.append(rowdata['address'])
return ret | def function[namedb_get_all_account_addresses, parameter[cur]]:
constant[
TESTING ONLY
get all account addresses
]
assert[name[BLOCKSTACK_TEST]]
variable[sql] assign[=] constant[SELECT DISTINCT address FROM accounts;]
variable[args] assign[=] tuple[[]]
variable[rows] assign[=] call[name[namedb_query_execute], parameter[name[cur], name[sql], name[args]]]
variable[ret] assign[=] list[[]]
for taget[name[rowdata]] in starred[name[rows]] begin[:]
call[name[ret].append, parameter[call[name[rowdata]][constant[address]]]]
return[name[ret]] | keyword[def] identifier[namedb_get_all_account_addresses] ( identifier[cur] ):
literal[string]
keyword[assert] identifier[BLOCKSTACK_TEST] , literal[string]
identifier[sql] = literal[string]
identifier[args] =()
identifier[rows] = identifier[namedb_query_execute] ( identifier[cur] , identifier[sql] , identifier[args] )
identifier[ret] =[]
keyword[for] identifier[rowdata] keyword[in] identifier[rows] :
identifier[ret] . identifier[append] ( identifier[rowdata] [ literal[string] ])
keyword[return] identifier[ret] | def namedb_get_all_account_addresses(cur):
"""
TESTING ONLY
get all account addresses
"""
assert BLOCKSTACK_TEST, 'BUG: this method is only available in test mode'
sql = 'SELECT DISTINCT address FROM accounts;'
args = ()
rows = namedb_query_execute(cur, sql, args)
ret = []
for rowdata in rows:
ret.append(rowdata['address']) # depends on [control=['for'], data=['rowdata']]
return ret |
def create_course_completion(self, user_id, payload): # pylint: disable=unused-argument
"""
Send a completion status payload to the Degreed Completion Status endpoint
Args:
user_id: Unused.
payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)
containing completion status fields per Degreed documentation.
Returns:
A tuple containing the status code and the body of the response.
Raises:
HTTPError: if we received a failure response code from Degreed
"""
return self._post(
urljoin(
self.enterprise_configuration.degreed_base_url,
self.global_degreed_config.completion_status_api_path
),
payload,
self.COMPLETION_PROVIDER_SCOPE
) | def function[create_course_completion, parameter[self, user_id, payload]]:
constant[
Send a completion status payload to the Degreed Completion Status endpoint
Args:
user_id: Unused.
payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)
containing completion status fields per Degreed documentation.
Returns:
A tuple containing the status code and the body of the response.
Raises:
HTTPError: if we received a failure response code from Degreed
]
return[call[name[self]._post, parameter[call[name[urljoin], parameter[name[self].enterprise_configuration.degreed_base_url, name[self].global_degreed_config.completion_status_api_path]], name[payload], name[self].COMPLETION_PROVIDER_SCOPE]]] | keyword[def] identifier[create_course_completion] ( identifier[self] , identifier[user_id] , identifier[payload] ):
literal[string]
keyword[return] identifier[self] . identifier[_post] (
identifier[urljoin] (
identifier[self] . identifier[enterprise_configuration] . identifier[degreed_base_url] ,
identifier[self] . identifier[global_degreed_config] . identifier[completion_status_api_path]
),
identifier[payload] ,
identifier[self] . identifier[COMPLETION_PROVIDER_SCOPE]
) | def create_course_completion(self, user_id, payload): # pylint: disable=unused-argument
'\n Send a completion status payload to the Degreed Completion Status endpoint\n\n Args:\n user_id: Unused.\n payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit)\n containing completion status fields per Degreed documentation.\n\n Returns:\n A tuple containing the status code and the body of the response.\n Raises:\n HTTPError: if we received a failure response code from Degreed\n '
return self._post(urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.completion_status_api_path), payload, self.COMPLETION_PROVIDER_SCOPE) |
def get_activity_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the activity administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ActivityAdminSession) - an
``ActivityAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_activity_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` is ``true``.*
"""
if not self.supports_activity_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ActivityAdminSession(proxy=proxy, runtime=self._runtime) | def function[get_activity_admin_session, parameter[self, proxy]]:
constant[Gets the ``OsidSession`` associated with the activity administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ActivityAdminSession) - an
``ActivityAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_activity_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` is ``true``.*
]
if <ast.UnaryOp object at 0x7da20c990580> begin[:]
<ast.Raise object at 0x7da20c991d20>
return[call[name[sessions].ActivityAdminSession, parameter[]]] | keyword[def] identifier[get_activity_admin_session] ( identifier[self] , identifier[proxy] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[supports_activity_admin] ():
keyword[raise] identifier[errors] . identifier[Unimplemented] ()
keyword[return] identifier[sessions] . identifier[ActivityAdminSession] ( identifier[proxy] = identifier[proxy] , identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_activity_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the activity administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ActivityAdminSession) - an
``ActivityAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_activity_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` is ``true``.*
"""
if not self.supports_activity_admin():
raise errors.Unimplemented() # depends on [control=['if'], data=[]]
# pylint: disable=no-member
return sessions.ActivityAdminSession(proxy=proxy, runtime=self._runtime) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.