code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def partial_imap_1to1(func, si_func):
""" a bit messy
DEPRICATE
"""
@functools.wraps(si_func)
def wrapper(input_):
if not util_iter.isiterable(input_):
return func(si_func(input_))
else:
return list(map(func, si_func(input_)))
set_funcname(wrapper, util_str.get_callable_name(func) + '_mapper_' + get_funcname(si_func))
return wrapper
|
a bit messy
DEPRICATE
|
def parameterized_expectations(model, verbose=False, initial_dr=None,
pert_order=1, with_complementarities=True,
grid={}, distribution={},
maxit=100, tol=1e-8, inner_maxit=100,
direct=False):
'''
Find global solution for ``model`` via parameterized expectations.
Controls must be expressed as a direct function of equilibrium objects.
Algorithm iterates over the expectations function in the arbitrage equation.
Parameters:
----------
model : NumericModel
``dtcscc`` model to be solved
verbose : boolean
if True, display iterations
initial_dr : decision rule
initial guess for the decision rule
pert_order : {1}
if no initial guess is supplied, the perturbation solution at order
``pert_order`` is used as initial guess
grid : grid options
distribution : distribution options
maxit : maximum number of iterations
tol : tolerance criterium for successive approximations
inner_maxit : maximum number of iteration for inner solver
direct : if True, solve with direct method. If false, solve indirectly
Returns
-------
decision rule :
approximated solution
'''
def vprint(t):
if verbose:
print(t)
g = model.functions['transition']
h = model.functions['expectation']
f = model.functions['arbitrage_exp'] # f(s, x, z, p, out)
parms = model.calibration['parameters']
if direct is True:
d = model.functions['direct_response']
approx = model.get_grid(**grid)
grid = approx.grid
interp_type = approx.interpolation
dr = create_interpolator(approx, interp_type) # Interp for control
expect = create_interpolator(approx, interp_type) # Interp for expectation
distrib = model.get_distribution(**distribution)
nodes, weights = distrib.discretize()
N = grid.shape[0]
if initial_dr is None:
if pert_order == 1:
initial_dr = approximate_controls(model)
if pert_order > 1:
raise Exception("Perturbation order > 1 not supported (yet).")
# Use initial decision rule to find initial expectation function
x_0 = initial_dr(grid)
x_0 = x_0.real # just in case ...
z_0 = np.zeros((N, len(model.symbols['expectations'])))
z_new = np.zeros((N, len(model.symbols['expectations'])))
xxnext = np.zeros((x_0.shape[0], x_0.shape[1], weights.shape[0]))
for i in range(weights.shape[0]):
e = nodes[i, :]
ssnext = g(grid, x_0, e, parms)
xxnext[:, :, i] = initial_dr(ssnext)
z_0 += weights[i]*h(ssnext, xxnext[:, :, i], parms)
t1 = time.time()
it = 0
err = 10
err_0 = 10
verbit = True if verbose == 'full' else False
if with_complementarities is True:
lbfun = model.functions['controls_lb']
ubfun = model.functions['controls_ub']
lb = lbfun(grid, parms)
ub = ubfun(grid, parms)
else:
lb = None
ub = None
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'
headline = headline.format('N', ' Error', 'Gain', 'Time')
stars = '-'*len(headline)
print(stars)
print(headline)
print(stars)
# format string for within loop
fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'
while err > tol and it <= maxit:
it += 1
t_start = time.time()
# update interpolation object with current guess for expectation
expect.set_values(z_0)
xxnext[...] = 0
if direct is True:
# Use control as direct function of arbitrage equation
xx = d(grid, expect(grid), parms)
for i in range(weights.shape[0]):
e = nodes[i, :]
ssnext = g(grid, xx, e, parms)
xxnext[:, :, i] = d(ssnext, expect(ssnext), parms)
if with_complementarities is True:
xx = np.minimum(xx, ub)
xx = np.maximum(xx, lb)
for i in range(weights.shape[0]):
xxnext[:, :, i] = np.minimum(xxnext[:, :, i], ub)
xxnext[:, :, i] = np.maximum(xxnext[:, :, i], lb)
else:
# Find control by solving arbitrage equation
def fun(x): return f(grid, x, expect(grid), parms)
sdfun = SerialDifferentiableFunction(fun)
if with_complementarities is True:
[xx, nit] = ncpsolve(sdfun, lb, ub, x_0, verbose=verbit,
maxit=inner_maxit)
dr.set_values(xx) # Update decision rule object
for i in range(weights.shape[0]):
e = nodes[i, :]
ssnext = g(grid, xx, e, parms)
xxnext[:, :, i] = dr(ssnext)
# Make sure x_t+1 is within bounds
xxnext[:, :, i] = np.minimum(xxnext[:, :, i], ub)
xxnext[:, :, i] = np.maximum(xxnext[:, :, i], lb)
else:
[xx, nit] = serial_newton(sdfun, x_0, verbose=verbit)
dr.set_values(xx) # Update decision rule object
for i in range(weights.shape[0]):
e = nodes[i, :]
ssnext = g(grid, xx, e, parms)
xxnext[:, :, i] = dr(ssnext)
# Compute the new expectation function
z_new[...] = 0
for i in range(weights.shape[0]):
e = nodes[i, :]
ssnext = g(grid, xx, e, parms)
z_new += weights[i]*h(ssnext, xxnext[:, :, i], parms)
# update error
err = (abs(z_new - z_0).max())
# Update guess for expectations function and the decision rule
z_0 = z_new.copy()
x_0 = xx
# print error infomation if `verbose`
err_SA = err/err_0
err_0 = err
t_finish = time.time()
elapsed = t_finish - t_start
if verbose:
print(fmt_str.format(it, err, err_SA, elapsed))
if it == maxit:
import warnings
warnings.warn(UserWarning("Maximum number of iterations reached"))
# compute final fime and do final printout if `verbose`
t2 = time.time()
if verbose:
print(stars)
print('Elapsed: {} seconds.'.format(t2 - t1))
print(stars)
# Interpolation for the decision rule
dr.set_values(x_0)
return dr
|
Find global solution for ``model`` via parameterized expectations.
Controls must be expressed as a direct function of equilibrium objects.
Algorithm iterates over the expectations function in the arbitrage equation.
Parameters:
----------
model : NumericModel
``dtcscc`` model to be solved
verbose : boolean
if True, display iterations
initial_dr : decision rule
initial guess for the decision rule
pert_order : {1}
if no initial guess is supplied, the perturbation solution at order
``pert_order`` is used as initial guess
grid : grid options
distribution : distribution options
maxit : maximum number of iterations
tol : tolerance criterium for successive approximations
inner_maxit : maximum number of iteration for inner solver
direct : if True, solve with direct method. If false, solve indirectly
Returns
-------
decision rule :
approximated solution
|
def hash(self, value):
"""
function hash() implement to acquire hash value that use simply method that weighted sum.
Parameters:
-----------
value: string
the value is param of need acquire hash
Returns:
--------
result
hash code for value
"""
result = 0
for i in range(len(value)):
result += self.seed * result + ord(value[i])
return (self.capacity - 1) % result
|
function hash() implement to acquire hash value that use simply method that weighted sum.
Parameters:
-----------
value: string
the value is param of need acquire hash
Returns:
--------
result
hash code for value
|
def run(self, args, options):
"""
This function calls uploadchannel which performs all the run steps:
- Create ChannelNode
- Pupulate Tree with TopicNodes, ContentNodes, and associated File objects
- .
- ..
- ...
Args:
args (dict): ricecooker command line arguments
options (dict): additional compatibility mode options given on command line
"""
self.pre_run(args, options)
args_and_options = args.copy()
args_and_options.update(options)
uploadchannel(self, **args_and_options)
|
This function calls uploadchannel which performs all the run steps:
- Create ChannelNode
- Pupulate Tree with TopicNodes, ContentNodes, and associated File objects
- .
- ..
- ...
Args:
args (dict): ricecooker command line arguments
options (dict): additional compatibility mode options given on command line
|
def generate_output_network(self, json_data=None, hr=True, show_name=False,
colorize=True):
"""
The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
if json_data is None:
json_data = {}
output = generate_output(
line='0',
short=HR_RDAP['network']['_short'] if hr else 'network',
name=HR_RDAP['network']['_name'] if (hr and show_name) else None,
is_parent=True,
colorize=colorize
)
for key, val in json_data['network'].items():
if key in ['links', 'status']:
output += self.generate_output_list(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key in ['notices', 'remarks']:
output += self.generate_output_notices(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key == 'events':
output += self.generate_output_events(
source='network',
key=key,
val=val,
line='1',
hr=hr,
show_name=show_name,
colorize=colorize
)
elif key not in ['raw']:
output += generate_output(
line='1',
short=HR_RDAP['network'][key]['_short'] if hr else key,
name=HR_RDAP['network'][key]['_name'] if (
hr and show_name) else None,
value=val,
colorize=colorize
)
return output
|
The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
|
def set_text(self, text=None):
"""stub"""
if text is None:
raise NullArgument()
if self.get_text_metadata().is_read_only():
raise NoAccess()
if not self.my_osid_object_form._is_valid_string(
text,
self.get_text_metadata()):
raise InvalidArgument()
self.my_osid_object_form._my_map['text']['text'] = text
|
stub
|
def get_course_current_grades(self, course_id):
"""
Returns a CurrentGradesByCourse object for all users in the specified course.
Args:
course_id (str): an edX course ids.
Returns:
CurrentGradesByCourse: object representing the student current grades
Authorization:
The authenticated user must have staff permissions to see grades for all users
in a course.
"""
resp = self.requester.get(
urljoin(
self.base_url,
'/api/grades/v1/courses/{course_key}/'.format(course_key=course_id)
)
)
resp.raise_for_status()
resp_json = resp.json()
if 'results' in resp_json:
grade_entries = [CurrentGrade(entry) for entry in resp_json["results"]]
while resp_json['next'] is not None:
resp = self.requester.get(resp_json['next'])
resp.raise_for_status()
resp_json = resp.json()
grade_entries.extend((CurrentGrade(entry) for entry in resp_json["results"]))
else:
grade_entries = [CurrentGrade(entry) for entry in resp_json]
return CurrentGradesByCourse(grade_entries)
|
Returns a CurrentGradesByCourse object for all users in the specified course.
Args:
course_id (str): an edX course ids.
Returns:
CurrentGradesByCourse: object representing the student current grades
Authorization:
The authenticated user must have staff permissions to see grades for all users
in a course.
|
def save_formset(self, request, form, formset, change):
"""
For each photo set it's author to currently authenticated user.
"""
instances = formset.save(commit=False)
for instance in instances:
if isinstance(instance, Photo):
instance.author = request.user
instance.save()
|
For each photo set it's author to currently authenticated user.
|
def setdefault(self, key, value):
"""We may not always be connected to an app, but we still need
to provide a way to the base environment to set it's defaults.
"""
try:
super(FlaskConfigStorage, self).setdefault(key, value)
except RuntimeError:
self._defaults.__setitem__(key, value)
|
We may not always be connected to an app, but we still need
to provide a way to the base environment to set it's defaults.
|
def synchronize(self, verbose=False):
"""
Synchronizes the Repository information with the directory.
All registered but missing files and directories in the directory,
will be automatically removed from the Repository.
:parameters:
#. verbose (boolean): Whether to be warn and inform about any abnormalities.
"""
if self.__path is None:
return
# walk directories
for dirPath in sorted(list(self.walk_directories_relative_path())):
realPath = os.path.join(self.__path, dirPath)
# if directory exist
if os.path.isdir(realPath):
continue
if verbose: warnings.warn("%s directory is missing"%realPath)
# loop to get dirInfoDict
keys = dirPath.split(os.sep)
dirInfoDict = self
for idx in range(len(keys)-1):
dirs = dict.get(dirInfoDict, 'directories', None)
if dirs is None: break
dirInfoDict = dict.get(dirs, keys[idx], None)
if dirInfoDict is None: break
# remove dirInfoDict directory if existing
if dirInfoDict is not None:
dirs = dict.get(dirInfoDict, 'directories', None)
if dirs is not None:
dict.pop( dirs, keys[-1], None )
# walk files
for filePath in sorted(list(self.walk_files_relative_path())):
realPath = os.path.join(self.__path, filePath)
# if file exists
if os.path.isfile( realPath ):
continue
if verbose: warnings.warn("%s file is missing"%realPath)
# loop to get dirInfoDict
keys = filePath.split(os.sep)
dirInfoDict = self
for idx in range(len(keys)-1):
dirs = dict.get(dirInfoDict, 'directories', None)
if dirs is None: break
dirInfoDict = dict.get(dirs, keys[idx], None)
if dirInfoDict is None: break
# remove dirInfoDict file if existing
if dirInfoDict is not None:
files = dict.get(dirInfoDict, 'files', None)
if files is not None:
dict.pop( files, keys[-1], None )
|
Synchronizes the Repository information with the directory.
All registered but missing files and directories in the directory,
will be automatically removed from the Repository.
:parameters:
#. verbose (boolean): Whether to be warn and inform about any abnormalities.
|
def _ctab(stream):
"""Process ``Ctab``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
"""
yield CtabBlockStart()
counts_line = stream.popleft()
counts_line_values = [counts_line[i:i + 3].strip() for i in range(0, len(counts_line) - 6, 3)] + \
[counts_line[-6:len(counts_line)].strip()]
ctab_counts_line = CtabCountsLine(*counts_line_values)
yield ctab_counts_line
number_of_atoms = ctab_counts_line.number_of_atoms
number_of_bonds = ctab_counts_line.number_of_bonds
# yield from _ctab_atom_bond_block(number_of_lines=number_of_atoms, block_type=CtabAtomBlockLine, stream=stream)
for token in _ctab_atom_bond_block(number_of_lines=number_of_atoms, block_type=CtabAtomBlockLine, stream=stream):
yield token
# yield from _ctab_atom_bond_block(number_of_lines=number_of_bonds, block_type=CtabBondBlockLine, stream=stream)
for token in _ctab_atom_bond_block(number_of_lines=number_of_bonds, block_type=CtabBondBlockLine, stream=stream):
yield token
# yield from _ctab_property_block(stream=stream)
for token in _ctab_property_block(stream=stream):
yield token
yield CtabBlockEnd()
|
Process ``Ctab``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
|
def image_show(id=None, name=None, profile=None): # pylint: disable=C0103
'''
Return details about a specific image (glance image-show)
CLI Example:
.. code-block:: bash
salt '*' glance.image_show
'''
g_client = _auth(profile)
ret = {}
if name:
for image in g_client.images.list():
if image.name == name:
id = image.id # pylint: disable=C0103
continue
if not id:
return {
'result': False,
'comment':
'Unable to resolve image ID '
'for name \'{0}\''.format(name)
}
try:
image = g_client.images.get(id)
except exc.HTTPNotFound:
return {
'result': False,
'comment': 'No image with ID {0}'.format(id)
}
log.debug(
'Properties of image %s:\n%s',
image.name, pprint.PrettyPrinter(indent=4).pformat(image)
)
schema = image_schema(profile=profile)
if len(schema.keys()) == 1:
schema = schema['image']
for key in schema:
if key in image:
ret[key] = image[key]
return ret
|
Return details about a specific image (glance image-show)
CLI Example:
.. code-block:: bash
salt '*' glance.image_show
|
def getChannel(self, channel_id, **kwargs):
"""
Load all information about a channel and return a custom Channel class.
Calls "getChannel" XML-RPC.
:param channel_id: ``int``, for example 12345, or ``str`` for name.
:returns: deferred that when fired returns a Channel (Munch, dict-like)
object representing this Koji channel, or None if no channel
was found.
"""
channelinfo = yield self.call('getChannel', channel_id, **kwargs)
channel = Channel.fromDict(channelinfo)
if channel:
channel.connection = self
defer.returnValue(channel)
|
Load all information about a channel and return a custom Channel class.
Calls "getChannel" XML-RPC.
:param channel_id: ``int``, for example 12345, or ``str`` for name.
:returns: deferred that when fired returns a Channel (Munch, dict-like)
object representing this Koji channel, or None if no channel
was found.
|
def get_xy(self, xy, addr=True):
"""Get the agent with xy-coordinate in the grid. If *addr* is True,
returns only the agent's address.
If no such agent in the grid, returns None.
:raises:
:exc:`ValueError` if xy-coordinate is outside the environment's
grid.
"""
x = xy[0]
y = xy[1]
if x < self.origin[0] or x >= self.origin[0] + self.gs[0]:
raise ValueError("x-coordinate inappropriate ({})".format(x))
if y < self.origin[1] or y >= self.origin[1] + self.gs[1]:
raise ValueError("y-coordinate inappropriate ({})".format(y))
i = x - self.origin[0]
j = y - self.origin[1]
if addr:
return self.grid[i][j].addr
return self.grid[i][j]
|
Get the agent with xy-coordinate in the grid. If *addr* is True,
returns only the agent's address.
If no such agent in the grid, returns None.
:raises:
:exc:`ValueError` if xy-coordinate is outside the environment's
grid.
|
def _serialize_datetime(value):
"""Serialize a DateTime object to its proper ISO-8601 representation."""
if not isinstance(value, (datetime, arrow.Arrow)):
raise ValueError(u'The received object was not a datetime: '
u'{} {}'.format(type(value), value))
return value.isoformat()
|
Serialize a DateTime object to its proper ISO-8601 representation.
|
def hmget(key, *fields, **options):
'''
Returns the values of all the given hash fields.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' redis.hmget foo_hash bar_field1 bar_field2
'''
host = options.get('host', None)
port = options.get('port', None)
database = options.get('db', None)
password = options.get('password', None)
server = _connect(host, port, database, password)
return server.hmget(key, *fields)
|
Returns the values of all the given hash fields.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' redis.hmget foo_hash bar_field1 bar_field2
|
def _lookup_abs(self, p, klass, create=1):
"""
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If a Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
"""
k = _my_normcase(p)
try:
result = self._lookupDict[k]
except KeyError:
if not create:
msg = "No such file or directory: '%s' in '%s' (and create is False)" % (p, str(self))
raise SCons.Errors.UserError(msg)
# There is no Node for this path name, and we're allowed
# to create it.
dir_name, file_name = p.rsplit('/',1)
dir_node = self._lookup_abs(dir_name, Dir)
result = klass(file_name, dir_node, self.fs)
# Double-check on disk (as configured) that the Node we
# created matches whatever is out there in the real world.
result.diskcheck_match()
self._lookupDict[k] = result
dir_node.entries[_my_normcase(file_name)] = result
dir_node.implicit = None
else:
# There is already a Node for this path name. Allow it to
# complain if we were looking for an inappropriate type.
result.must_be_same(klass)
return result
|
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If a Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
|
def dskx02(handle, dladsc, vertex, raydir):
"""
Determine the plate ID and body-fixed coordinates of the
intersection of a specified ray with the surface defined by a
type 2 DSK plate model.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskx02_c.html
:param handle: Handle of DSK kernel containing plate model.
:type handle: int
:param dladsc: DLA descriptor of plate model segment.
:type dladsc: spiceypy.utils.support_types.SpiceDLADescr
:param vertex: Ray's vertex in the body fixed frame.
:type vertex: 3-Element Array of floats
:param raydir: Ray direction in the body fixed frame.
:type raydir: 3-Element Array of floats
:return: ID code of the plate intersected by the ray, Intercept, and Flag indicating whether intercept exists.
:rtype: tuple
"""
handle = ctypes.c_int(handle)
vertex = stypes.toDoubleVector(vertex)
raydir = stypes.toDoubleVector(raydir)
plid = ctypes.c_int()
xpt = stypes.emptyDoubleVector(3)
found = ctypes.c_int()
libspice.dskx02_c(handle, ctypes.byref(dladsc), vertex, raydir, ctypes.byref(plid), xpt, ctypes.byref(found))
return plid.value, stypes.cVectorToPython(xpt), bool(found.value)
|
Determine the plate ID and body-fixed coordinates of the
intersection of a specified ray with the surface defined by a
type 2 DSK plate model.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskx02_c.html
:param handle: Handle of DSK kernel containing plate model.
:type handle: int
:param dladsc: DLA descriptor of plate model segment.
:type dladsc: spiceypy.utils.support_types.SpiceDLADescr
:param vertex: Ray's vertex in the body fixed frame.
:type vertex: 3-Element Array of floats
:param raydir: Ray direction in the body fixed frame.
:type raydir: 3-Element Array of floats
:return: ID code of the plate intersected by the ray, Intercept, and Flag indicating whether intercept exists.
:rtype: tuple
|
def get_layer_params(self, layer_name):
"""
Provides access to the parameters of the given layer.
Works arounds the non-availability of graph collections in
eager mode.
:layer_name: name of the layer for which parameters are
required, must be one of the string in the
list layer_names
:return: list of parameters corresponding to the given
layer.
"""
assert layer_name in self.layer_names
out = []
layer = self.layers[layer_name]
layer_variables = layer.variables
# For each parameter in a layer.
for param in layer_variables:
if param not in out:
out.append(param)
return out
|
Provides access to the parameters of the given layer.
Works arounds the non-availability of graph collections in
eager mode.
:layer_name: name of the layer for which parameters are
required, must be one of the string in the
list layer_names
:return: list of parameters corresponding to the given
layer.
|
def Collect(
self, knowledge_base, artifact_definition, searcher, file_system):
"""Collects values using a file artifact definition.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
artifact_definition (artifacts.ArtifactDefinition): artifact definition.
searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess
the file system.
file_system (dfvfs.FileSystem): file system to be preprocessed.
Raises:
PreProcessFail: if the preprocessing fails.
"""
for source in artifact_definition.sources:
if source.type_indicator not in (
artifact_definitions.TYPE_INDICATOR_FILE,
artifact_definitions.TYPE_INDICATOR_PATH):
continue
for path in source.paths:
# Make sure the path separators used in the artifact definition
# correspond to those used by the file system.
path_segments = path.split(source.separator)
find_spec = file_system_searcher.FindSpec(
location_glob=path_segments[1:], case_sensitive=False)
for path_specification in searcher.Find(find_specs=[find_spec]):
self._ParsePathSpecification(
knowledge_base, searcher, file_system, path_specification,
source.separator)
|
Collects values using a file artifact definition.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
artifact_definition (artifacts.ArtifactDefinition): artifact definition.
searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess
the file system.
file_system (dfvfs.FileSystem): file system to be preprocessed.
Raises:
PreProcessFail: if the preprocessing fails.
|
def collect(self, order_ref):
"""Collects the result of a sign or auth order using the
``orderRef`` as reference.
RP should keep on calling collect every two seconds as long as status
indicates pending. RP must abort if status indicates failed. The user
identity is returned when complete.
Example collect results returned while authentication or signing is
still pending:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"status":"pending",
"hintCode":"userSign"
}
Example collect result when authentication or signing has failed:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"status":"failed",
"hintCode":"userCancel"
}
Example collect result when authentication or signing is successful
and completed:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"status":"complete",
"completionData": {
"user": {
"personalNumber":"190000000000",
"name":"Karl Karlsson",
"givenName":"Karl",
"surname":"Karlsson"
},
"device": {
"ipAddress":"192.168.0.1"
},
"cert": {
"notBefore":"1502983274000",
"notAfter":"1563549674000"
},
"signature":"<base64-encoded data>",
"ocspResponse":"<base64-encoded data>"
}
}
See `BankID Relying Party Guidelines Version: 3.0 <https://www.bankid.com/assets/bankid/rp/bankid-relying-party-guidelines-v3.0.pdf>`_
for more details about how to inform end user of the current status,
whether it is pending, failed or completed.
:param order_ref: The ``orderRef`` UUID returned from auth or sign.
:type order_ref: str
:return: The CollectResponse parsed to a dictionary.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server.
"""
response = self.client.post(
self._collect_endpoint, json={"orderRef": order_ref}
)
if response.status_code == 200:
return response.json()
else:
raise get_json_error_class(response)
|
Collects the result of a sign or auth order using the
``orderRef`` as reference.
RP should keep on calling collect every two seconds as long as status
indicates pending. RP must abort if status indicates failed. The user
identity is returned when complete.
Example collect results returned while authentication or signing is
still pending:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"status":"pending",
"hintCode":"userSign"
}
Example collect result when authentication or signing has failed:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"status":"failed",
"hintCode":"userCancel"
}
Example collect result when authentication or signing is successful
and completed:
.. code-block:: json
{
"orderRef":"131daac9-16c6-4618-beb0-365768f37288",
"status":"complete",
"completionData": {
"user": {
"personalNumber":"190000000000",
"name":"Karl Karlsson",
"givenName":"Karl",
"surname":"Karlsson"
},
"device": {
"ipAddress":"192.168.0.1"
},
"cert": {
"notBefore":"1502983274000",
"notAfter":"1563549674000"
},
"signature":"<base64-encoded data>",
"ocspResponse":"<base64-encoded data>"
}
}
See `BankID Relying Party Guidelines Version: 3.0 <https://www.bankid.com/assets/bankid/rp/bankid-relying-party-guidelines-v3.0.pdf>`_
for more details about how to inform end user of the current status,
whether it is pending, failed or completed.
:param order_ref: The ``orderRef`` UUID returned from auth or sign.
:type order_ref: str
:return: The CollectResponse parsed to a dictionary.
:rtype: dict
:raises BankIDError: raises a subclass of this error
when error has been returned from server.
|
def winsorize(self,
min_percentile,
max_percentile,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new factor that winsorizes the result of this factor.
Winsorizing changes values ranked less than the minimum percentile to
the value at the minimum percentile. Similarly, values ranking above
the maximum percentile are changed to the value at the maximum
percentile.
Winsorizing is useful for limiting the impact of extreme data points
without completely removing those points.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing percentile cutoffs, and output NaN anywhere the mask is
False.
If ``groupby`` is supplied, winsorization is applied separately
separately to each group defined by ``groupby``.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(input) * min_percentile)th lowest value. If low
values should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(input) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Examples
--------
.. code-block:: python
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
::
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby`
"""
if not 0.0 <= min_percentile < max_percentile <= 1.0:
raise BadPercentileBounds(
min_percentile=min_percentile,
max_percentile=max_percentile,
upper_bound=1.0,
)
return GroupedRowTransform(
transform=winsorize,
transform_args=(min_percentile, max_percentile),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=self.window_safe,
)
|
Construct a new factor that winsorizes the result of this factor.
Winsorizing changes values ranked less than the minimum percentile to
the value at the minimum percentile. Similarly, values ranking above
the maximum percentile are changed to the value at the maximum
percentile.
Winsorizing is useful for limiting the impact of extreme data points
without completely removing those points.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing percentile cutoffs, and output NaN anywhere the mask is
False.
If ``groupby`` is supplied, winsorization is applied separately
separately to each group defined by ``groupby``.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(input) * min_percentile)th lowest value. If low
values should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(input) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Examples
--------
.. code-block:: python
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
::
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby`
|
def fit(self, X, y, **fit_params):
"""See ``NeuralNet.fit``.
In contrast to ``NeuralNet.fit``, ``y`` is non-optional to
avoid mistakenly forgetting about ``y``. However, ``y`` can be
set to ``None`` in case it is derived dynamically from
``X``.
"""
# pylint: disable=useless-super-delegation
# this is actually a pylint bug:
# https://github.com/PyCQA/pylint/issues/1085
return super().fit(X, y, **fit_params)
|
See ``NeuralNet.fit``.
In contrast to ``NeuralNet.fit``, ``y`` is non-optional to
avoid mistakenly forgetting about ``y``. However, ``y`` can be
set to ``None`` in case it is derived dynamically from
``X``.
|
def add_step(self, setting, duration):
"""
Adds steps to a program.
:param setting: Current, Wattage or Resistance, depending on program mode.
:param duration: Length of step in seconds.
:return: None
"""
if len(self._prog_steps) < 10:
self._prog_steps.append(ProgramStep(self, setting, duration))
else:
raise IndexError("Maximum of 10 steps are allowed")
|
Adds steps to a program.
:param setting: Current, Wattage or Resistance, depending on program mode.
:param duration: Length of step in seconds.
:return: None
|
def download_file_job(entry, directory, checksums, filetype='genbank', symlink_path=None):
"""Generate a DownloadJob that actually triggers a file download."""
pattern = NgdConfig.get_fileending(filetype)
filename, expected_checksum = get_name_and_checksum(checksums, pattern)
base_url = convert_ftp_url(entry['ftp_path'])
full_url = '{}/{}'.format(base_url, filename)
local_file = os.path.join(directory, filename)
full_symlink = None
if symlink_path is not None:
full_symlink = os.path.join(symlink_path, filename)
# Keep metadata around
mtable = metadata.get()
mtable.add(entry, local_file)
return DownloadJob(full_url, local_file, expected_checksum, full_symlink)
|
Generate a DownloadJob that actually triggers a file download.
|
def asint(vari):
"""
Convert dtype of polynomial coefficients to float.
Example:
>>> poly = 1.5*cp.variable()+2.25
>>> print(poly)
1.5q0+2.25
>>> print(cp.asint(poly))
q0+2
"""
if isinstance(vari, Poly):
core = vari.A.copy()
for key in vari.keys:
core[key] = numpy.asarray(core[key], dtype=int)
return Poly(core, vari.dim, vari.shape, int)
return numpy.asarray(vari, dtype=int)
|
Convert dtype of polynomial coefficients to float.
Example:
>>> poly = 1.5*cp.variable()+2.25
>>> print(poly)
1.5q0+2.25
>>> print(cp.asint(poly))
q0+2
|
def workspaces_provider(context):
"""
create a vocab of all workspaces in this site
"""
catalog = api.portal.get_tool(name="portal_catalog")
workspaces = catalog(portal_type="ploneintranet.workspace.workspacefolder")
current = api.content.get_uuid(context)
terms = []
for ws in workspaces:
if current != ws["UID"]:
terms.append(SimpleVocabulary.createTerm(
ws["UID"], ws["UID"], ws["Title"]))
return SimpleVocabulary(terms)
|
create a vocab of all workspaces in this site
|
def get_fields(model_class, field_name='', path=''):
""" Get fields and meta data from a model
:param model_class: A django model class
:param field_name: The field name to get sub fields from
:param path: path of our field in format
field_name__second_field_name__ect__
:returns: Returns fields and meta data about such fields
fields: Django model fields
properties: Any properties the model has
path: Our new path
:rtype: dict
"""
fields = get_direct_fields_from_model(model_class)
app_label = model_class._meta.app_label
if field_name != '':
field, model, direct, m2m = _get_field_by_name(model_class, field_name)
path += field_name
path += '__'
if direct: # Direct field
try:
new_model = _get_remote_field(field).parent_model
except AttributeError:
new_model = _get_remote_field(field).model
else: # Indirect related field
new_model = field.related_model
fields = get_direct_fields_from_model(new_model)
app_label = new_model._meta.app_label
return {
'fields': fields,
'path': path,
'app_label': app_label,
}
|
Get fields and meta data from a model
:param model_class: A django model class
:param field_name: The field name to get sub fields from
:param path: path of our field in format
field_name__second_field_name__ect__
:returns: Returns fields and meta data about such fields
fields: Django model fields
properties: Any properties the model has
path: Our new path
:rtype: dict
|
def write_results(self, data, name=None):
"""
Write JSON to file with the specified name.
:param name: Path to the file to be written to. If no path is passed
a new JSON file "results.json" will be created in the
current working directory.
:param output: JSON object.
"""
if name:
filepath = os.path.abspath(name)
else:
filepath = os.path.join(os.path.getcwd(), "results.json")
with open(filepath, "w", encoding="utf8") as f:
try:
f.write(unicode(json.dumps(data, indent=4)))
except NameError:
f.write(json.dumps(data, indent=4))
|
Write JSON to file with the specified name.
:param name: Path to the file to be written to. If no path is passed
a new JSON file "results.json" will be created in the
current working directory.
:param output: JSON object.
|
def get_modules(modulename=None):
"""Return a list of modules and packages under modulename.
If modulename is not given, return a list of all top level modules
and packages.
"""
modulename = compat.ensure_not_unicode(modulename)
if not modulename:
try:
return ([modname for (importer, modname, ispkg)
in iter_modules()
if not modname.startswith("_")] +
list(sys.builtin_module_names))
except OSError:
# Bug in Python 2.6, see #275
return list(sys.builtin_module_names)
try:
module = safeimport(modulename)
except ErrorDuringImport:
return []
if module is None:
return []
if hasattr(module, "__path__"):
return [modname for (importer, modname, ispkg)
in iter_modules(module.__path__)
if not modname.startswith("_")]
return []
|
Return a list of modules and packages under modulename.
If modulename is not given, return a list of all top level modules
and packages.
|
def _bapp(self, sample, target_label, target_image):
"""
Main algorithm for Boundary Attack ++.
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param sample: input image. Without the batchsize dimension.
:param target_label: integer for targeted attack,
None for nontargeted attack. Without the batchsize dimension.
:param target_image: an array with the same size as sample, or None.
Without the batchsize dimension.
Output:
perturbed image.
"""
# Original label required for untargeted attack.
if target_label is None:
original_label = np.argmax(
self.sess.run(self.logits, feed_dict={self.input_ph: sample[None]})
)
else:
target_label = np.argmax(target_label)
def decision_function(images):
"""
Decision function output 1 on the desired side of the boundary,
0 otherwise.
"""
images = clip_image(images, self.clip_min, self.clip_max)
prob = []
for i in range(0, len(images), self.batch_size):
batch = images[i:i+self.batch_size]
prob_i = self.sess.run(self.logits, feed_dict={self.input_ph: batch})
prob.append(prob_i)
prob = np.concatenate(prob, axis=0)
if target_label is None:
return np.argmax(prob, axis=1) != original_label
else:
return np.argmax(prob, axis=1) == target_label
# Initialize.
if target_image is None:
perturbed = initialize(decision_function, sample, self.shape,
self.clip_min, self.clip_max)
else:
perturbed = target_image
# Project the initialization to the boundary.
perturbed, dist_post_update = binary_search_batch(sample,
np.expand_dims(perturbed, 0),
decision_function,
self.shape,
self.constraint,
self.theta)
dist = compute_distance(perturbed, sample, self.constraint)
for j in np.arange(self.num_iterations):
current_iteration = j + 1
# Choose delta.
delta = select_delta(dist_post_update, current_iteration,
self.clip_max, self.clip_min, self.d,
self.theta, self.constraint)
# Choose number of evaluations.
num_evals = int(min([self.initial_num_evals * np.sqrt(j+1),
self.max_num_evals]))
# approximate gradient.
gradf = approximate_gradient(decision_function, perturbed, num_evals,
delta, self.constraint, self.shape,
self.clip_min, self.clip_max)
if self.constraint == 'linf':
update = np.sign(gradf)
else:
update = gradf
# search step size.
if self.stepsize_search == 'geometric_progression':
# find step size.
epsilon = geometric_progression_for_stepsize(perturbed,
update, dist, decision_function, current_iteration)
# Update the sample.
perturbed = clip_image(perturbed + epsilon * update,
self.clip_min, self.clip_max)
# Binary search to return to the boundary.
perturbed, dist_post_update = binary_search_batch(sample,
perturbed[None],
decision_function,
self.shape,
self.constraint,
self.theta)
elif self.stepsize_search == 'grid_search':
# Grid search for stepsize.
epsilons = np.logspace(-4, 0, num=20, endpoint=True) * dist
epsilons_shape = [20] + len(self.shape) * [1]
perturbeds = perturbed + epsilons.reshape(epsilons_shape) * update
perturbeds = clip_image(perturbeds, self.clip_min, self.clip_max)
idx_perturbed = decision_function(perturbeds)
if np.sum(idx_perturbed) > 0:
# Select the perturbation that yields the minimum distance # after binary search.
perturbed, dist_post_update = binary_search_batch(sample,
perturbeds[idx_perturbed],
decision_function,
self.shape,
self.constraint,
self.theta)
# compute new distance.
dist = compute_distance(perturbed, sample, self.constraint)
if self.verbose:
print('iteration: {:d}, {:s} distance {:.4E}'.format(
j+1, self.constraint, dist))
perturbed = np.expand_dims(perturbed, 0)
return perturbed
|
Main algorithm for Boundary Attack ++.
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param sample: input image. Without the batchsize dimension.
:param target_label: integer for targeted attack,
None for nontargeted attack. Without the batchsize dimension.
:param target_image: an array with the same size as sample, or None.
Without the batchsize dimension.
Output:
perturbed image.
|
def export(local_root, commit, target):
"""Export git commit to directory. "Extracts" all files at the commit to the target directory.
Set mtime of RST files to last commit date.
:raise CalledProcessError: Unhandled git command failure.
:param str local_root: Local path to git root directory.
:param str commit: Git commit SHA to export.
:param str target: Directory to export to.
"""
log = logging.getLogger(__name__)
target = os.path.realpath(target)
mtimes = list()
# Define extract function.
def extract(stdout):
"""Extract tar archive from "git archive" stdout.
:param file stdout: Handle to git's stdout pipe.
"""
queued_links = list()
try:
with tarfile.open(fileobj=stdout, mode='r|') as tar:
for info in tar:
log.debug('name: %s; mode: %d; size: %s; type: %s', info.name, info.mode, info.size, info.type)
path = os.path.realpath(os.path.join(target, info.name))
if not path.startswith(target): # Handle bad paths.
log.warning('Ignoring tar object path %s outside of target directory.', info.name)
elif info.isdir(): # Handle directories.
if not os.path.exists(path):
os.makedirs(path, mode=info.mode)
elif info.issym() or info.islnk(): # Queue links.
queued_links.append(info)
else: # Handle files.
tar.extract(member=info, path=target)
if os.path.splitext(info.name)[1].lower() == '.rst':
mtimes.append(info.name)
for info in (i for i in queued_links if os.path.exists(os.path.join(target, i.linkname))):
tar.extract(member=info, path=target)
except tarfile.TarError as exc:
log.debug('Failed to extract output from "git archive" command: %s', str(exc))
# Run command.
run_command(local_root, ['git', 'archive', '--format=tar', commit], pipeto=extract)
# Set mtime.
for file_path in mtimes:
last_committed = int(run_command(local_root, ['git', 'log', '-n1', '--format=%at', commit, '--', file_path]))
os.utime(os.path.join(target, file_path), (last_committed, last_committed))
|
Export git commit to directory. "Extracts" all files at the commit to the target directory.
Set mtime of RST files to last commit date.
:raise CalledProcessError: Unhandled git command failure.
:param str local_root: Local path to git root directory.
:param str commit: Git commit SHA to export.
:param str target: Directory to export to.
|
def as_task(self, logger=None, **fields):
"""
Start a new L{eliot.Action} of this type as a task (i.e. top-level
action) with the given start fields.
See L{ActionType.__call__} for example of usage.
@param logger: A L{eliot.ILogger} provider to which the action's
messages will be written, or C{None} to use the default one.
@param fields: Extra fields to add to the message.
@rtype: L{eliot.Action}
"""
return self._startTask(
logger, self.action_type, self._serializers, **fields)
|
Start a new L{eliot.Action} of this type as a task (i.e. top-level
action) with the given start fields.
See L{ActionType.__call__} for example of usage.
@param logger: A L{eliot.ILogger} provider to which the action's
messages will be written, or C{None} to use the default one.
@param fields: Extra fields to add to the message.
@rtype: L{eliot.Action}
|
def _process_config(self):
"""Traverses the config and adds master keys and regional clients as needed."""
self._user_agent_adding_config = botocore.config.Config(user_agent_extra=USER_AGENT_SUFFIX)
if self.config.region_names:
self.add_regional_clients_from_list(self.config.region_names)
self.default_region = self.config.region_names[0]
else:
self.default_region = self.config.botocore_session.get_config_variable("region")
if self.default_region is not None:
self.add_regional_client(self.default_region)
if self.config.key_ids:
self.add_master_keys_from_list(self.config.key_ids)
|
Traverses the config and adds master keys and regional clients as needed.
|
def generate_data(nitem, nfeat=2, dim=10, labeldim=1, base='item'):
"""Returns a randomly generated h5f.Data instance.
- nitem is the number of items to generate.
- nfeat is the number of features to generate for each item.
- dim is the dimension of the features vectors.
- base is the items basename
- labeldim is the dimension of the labels vectors.
"""
import numpy as np
# A list of item names
items = [base + '_' + str(i) for i in range(nitem)]
# A list of features arrays
features = [np.random.randn(nfeat, dim) for _ in range(nitem)]
# A list on 1D or 2D times arrays
if labeldim == 1:
labels = [np.linspace(0, 1, nfeat)] * nitem
else:
t = np.linspace(0, 1, nfeat)
labels = [np.array([t+i for i in range(labeldim)])] * nitem
# Format data as required by the writer
return h5f.Data(items, labels, features, check=True)
|
Returns a randomly generated h5f.Data instance.
- nitem is the number of items to generate.
- nfeat is the number of features to generate for each item.
- dim is the dimension of the features vectors.
- base is the items basename
- labeldim is the dimension of the labels vectors.
|
def emit(self, span_datas):
"""
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit
"""
try:
# TODO: keep the stream alive.
# The stream is terminated after iteration completes.
# To keep it alive, we can enqueue proto spans here
# and asyncronously read them and send to the agent.
responses = self.client.Export(
self.generate_span_requests(span_datas))
# read response
for _ in responses:
pass
except grpc.RpcError:
pass
|
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit
|
def apply(self, X, ntree_limit=0):
"""Return the predicted leaf every tree for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
For each datapoint x in X and for each tree, return the index of the
leaf x ends up in. Leaves are numbered within
``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.
"""
test_dmatrix = DMatrix(X, missing=self.missing, nthread=self.n_jobs)
return self.get_booster().predict(test_dmatrix,
pred_leaf=True,
ntree_limit=ntree_limit)
|
Return the predicted leaf every tree for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
For each datapoint x in X and for each tree, return the index of the
leaf x ends up in. Leaves are numbered within
``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.
|
def get_public_ip(addresses, version=4):
"""Return either the devices public IPv4 or IPv6 address.
"""
for addr in addresses:
if addr['public'] and addr['address_family'] == version:
return addr.get('address')
return None
|
Return either the devices public IPv4 or IPv6 address.
|
def _lmder1_freudenstein_roth():
"""Freudenstein and Roth function (lmder1 test #7)"""
def func(params, vec):
vec[0] = -13 + params[0] + ((5 - params[1]) * params[1] - 2) * params[1]
vec[1] = -29 + params[0] + ((1 + params[1]) * params[1] - 14) * params[1]
def jac(params, jac):
jac[0] = 1
jac[1,0] = params[1] * (10 - 3 * params[1]) - 2
jac[1,1] = params[1] * (2 + 3 * params[1]) - 14
guess = np.asfarray([0.5, -2])
_lmder1_driver(2, func, jac, guess,
0.200124960962e+02, 0.699887517585e+01,
[0.114124844655e+02, -0.896827913732e+00])
_lmder1_driver(2, func, jac, guess * 10,
0.124328339489e+05, 0.699887517449e+01,
[0.114130046615e+02, -0.896796038686e+00])
_lmder1_driver(2, func, jac, guess * 100,
0.11426454595762e+08, 0.699887517243e+01,
[0.114127817858e+02, -0.896805107492e+00])
|
Freudenstein and Roth function (lmder1 test #7)
|
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
|
Saves the name of the visible toolbars in the .ini file.
|
def tick(self, index, length):
"""
Add tick marks in order of axes by width
APIPARAM: chxtc <axis index>,<length of tick mark>
"""
assert int(length) <= 25, 'Width cannot be more than 25'
self.data['ticks'].append('%s,%d'%(index,length))
return self.parent
|
Add tick marks in order of axes by width
APIPARAM: chxtc <axis index>,<length of tick mark>
|
def dcc(self, *args, **kwargs):
"""Create and associate a new DCCConnection object.
Use the returned object to listen for or connect to
a DCC peer.
"""
dcc = self.reactor.dcc(*args, **kwargs)
self.dcc_connections.append(dcc)
return dcc
|
Create and associate a new DCCConnection object.
Use the returned object to listen for or connect to
a DCC peer.
|
def reset(self):
"""
Reset the videostream by restarting ffmpeg
"""
if self.ffmpeg_process is not None:
# Close the previous stream
try:
self.ffmpeg_process.send_signal(signal.SIGINT)
except OSError:
pass
command = []
command.extend([
self.ffmpeg_binary,
'-loglevel', 'verbose',
'-y', # overwrite previous file/stream
# '-re', # native frame-rate
'-analyzeduration', '1',
'-f', 'rawvideo',
'-r', '%d' % self.fps, # set a fixed frame rate
'-vcodec', 'rawvideo',
# size of one frame
'-s', '%dx%d' % (self.width, self.height),
'-pix_fmt', 'rgb24', # The input are raw bytes
'-thread_queue_size', '1024',
'-i', '/tmp/videopipe', # The input comes from a pipe
# Twitch needs to receive sound in their streams!
# '-an', # Tells FFMPEG not to expect any audio
])
if self.audio_enabled:
command.extend([
'-ar', '%d' % AUDIORATE,
'-ac', '2',
'-f', 's16le',
'-thread_queue_size', '1024',
'-i', '/tmp/audiopipe'
])
else:
command.extend([
'-ar', '8000',
'-ac', '1',
'-f', 's16le',
'-i', '/dev/zero', # silence alternative, works forever
# '-i','http://stream1.radiostyle.ru:8001/tunguska',
# '-filter_complex',
# '[0:1][1:0]amix=inputs=2:duration=first[all_audio]'
])
command.extend([
# VIDEO CODEC PARAMETERS
'-vcodec', 'libx264',
'-r', '%d' % self.fps,
'-b:v', '3000k',
'-s', '%dx%d' % (self.width, self.height),
'-preset', 'faster', '-tune', 'zerolatency',
'-crf', '23',
'-pix_fmt', 'yuv420p',
# '-force_key_frames', r'expr:gte(t,n_forced*2)',
'-minrate', '3000k', '-maxrate', '3000k',
'-bufsize', '12000k',
'-g', '60', # key frame distance
'-keyint_min', '1',
# '-filter:v "setpts=0.25*PTS"'
# '-vsync','passthrough',
# AUDIO CODEC PARAMETERS
'-acodec', 'libmp3lame', '-ar', '44100', '-b:a', '160k',
# '-bufsize', '8192k',
'-ac', '1',
# '-acodec', 'aac', '-strict', 'experimental',
# '-ab', '128k', '-ar', '44100', '-ac', '1',
# '-async','44100',
# '-filter_complex', 'asplit', #for audio sync?
# STORE THE VIDEO PARAMETERS
# '-vcodec', 'libx264', '-s', '%dx%d'%(width, height),
# '-preset', 'libx264-fast',
# 'my_output_videofile2.avi'
# MAP THE STREAMS
# use only video from first input and only audio from second
'-map', '0:v', '-map', '1:a',
# NUMBER OF THREADS
'-threads', '2',
# STREAM TO TWITCH
'-f', 'flv', 'rtmp://live-ams.twitch.tv/app/%s' %
self.twitch_stream_key
])
devnullpipe = open("/dev/null", "w") # Throw away stream
if self.verbose:
devnullpipe = None
self.ffmpeg_process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stderr=devnullpipe,
stdout=devnullpipe)
|
Reset the videostream by restarting ffmpeg
|
def com_google_fonts_check_has_ttfautohint_params(ttFont):
""" Font has ttfautohint params? """
from fontbakery.utils import get_name_entry_strings
def ttfautohint_version(value):
# example string:
#'Version 1.000; ttfautohint (v0.93) -l 8 -r 50 -G 200 -x 14 -w "G"
import re
results = re.search(r'ttfautohint \(v(.*)\) ([^;]*)', value)
if results:
return results.group(1), results.group(2)
version_strings = get_name_entry_strings(ttFont, NameID.VERSION_STRING)
failed = True
for vstring in version_strings:
values = ttfautohint_version(vstring)
if values:
ttfa_version, params = values
if params:
yield PASS, f"Font has ttfautohint params ({params})"
failed = False
else:
yield SKIP, "Font appears to our heuristic as not hinted using ttfautohint."
failed = False
if failed:
yield FAIL, "Font is lacking ttfautohint params on its version strings on the name table."
|
Font has ttfautohint params?
|
def lock(self):
"""Lock specified (abstract) requirements into (concrete) candidates.
The locking procedure consists of four stages:
* Resolve versions and dependency graph (powered by ResolveLib).
* Walk the graph to determine "why" each candidate came to be, i.e.
what top-level requirements result in a given candidate.
* Populate hashes for resolved candidates.
* Populate markers based on dependency specifications of each
candidate, and the dependency graph.
"""
provider = self.get_provider()
reporter = self.get_reporter()
resolver = resolvelib.Resolver(provider, reporter)
with vistir.cd(self.project.root):
state = resolver.resolve(self.requirements)
traces = trace_graph(state.graph)
hash_cache = HashCache()
for r in state.mapping.values():
if not r.hashes:
r.hashes = get_hashes(hash_cache, r)
set_metadata(
state.mapping, traces,
provider.fetched_dependencies,
provider.collected_requires_pythons,
)
lockfile = plette.Lockfile.with_meta_from(self.project.pipfile)
lockfile["default"] = _collect_derived_entries(
state, traces, self.default_requirements,
)
lockfile["develop"] = _collect_derived_entries(
state, traces, self.develop_requirements,
)
self.project.lockfile = lockfile
|
Lock specified (abstract) requirements into (concrete) candidates.
The locking procedure consists of four stages:
* Resolve versions and dependency graph (powered by ResolveLib).
* Walk the graph to determine "why" each candidate came to be, i.e.
what top-level requirements result in a given candidate.
* Populate hashes for resolved candidates.
* Populate markers based on dependency specifications of each
candidate, and the dependency graph.
|
def from_kwargs(cls, **kwargs):
"""Initialise configuration from kwargs."""
config = cls()
for slot in cls.__slots__:
if slot.startswith('_'):
slot = slot[1:]
setattr(config, slot, kwargs.pop(slot, cls.get_default(slot)))
if kwargs:
raise ValueError("Unrecognized option(s): {}".format(kwargs.keys()))
return config
|
Initialise configuration from kwargs.
|
def fullselection(self) -> selectiontools.Selection:
"""A |Selection| object containing all |Element| and |Node| objects
defined by |XMLInterface.selections| and |XMLInterface.devices|.
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import HydPy, TestIO, XMLInterface
>>> hp = HydPy('LahnH')
>>> with TestIO():
... hp.prepare_network()
... interface = XMLInterface('single_run.xml')
>>> interface.find('selections').text = 'nonheadwaters'
>>> interface.fullselection
Selection("fullselection",
nodes=("dill", "lahn_2", "lahn_3"),
elements=("land_dill", "land_lahn_1", "land_lahn_2",
"land_lahn_3"))
"""
fullselection = selectiontools.Selection('fullselection')
for selection in self.selections:
fullselection += selection
fullselection += self.devices
return fullselection
|
A |Selection| object containing all |Element| and |Node| objects
defined by |XMLInterface.selections| and |XMLInterface.devices|.
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import HydPy, TestIO, XMLInterface
>>> hp = HydPy('LahnH')
>>> with TestIO():
... hp.prepare_network()
... interface = XMLInterface('single_run.xml')
>>> interface.find('selections').text = 'nonheadwaters'
>>> interface.fullselection
Selection("fullselection",
nodes=("dill", "lahn_2", "lahn_3"),
elements=("land_dill", "land_lahn_1", "land_lahn_2",
"land_lahn_3"))
|
def install_timers(config, context):
"""Create the timers as specified by the plugin configuration."""
timers = []
if config.get('capture_timeout_warnings'):
timeout_threshold = config.get('timeout_warning_threshold')
# Schedule the warning at the user specified threshold given in percent.
# ie: 0.50 of 30000 ms = 15000ms
# Schedule the error a few milliseconds before the actual timeout happens.
time_remaining = context.get_remaining_time_in_millis() / 1000
timers.append(Timer(time_remaining * timeout_threshold, timeout_warning, (config, context)))
timers.append(Timer(max(time_remaining - .5, 0), timeout_error, [config]))
if config.get('capture_memory_warnings'):
# Schedule the memory watch dog interval. Warning will re-schedule itself if necessary.
timers.append(Timer(.5, memory_warning, (config, context)))
for t in timers:
t.start()
return timers
|
Create the timers as specified by the plugin configuration.
|
def get_checksum(file):
"""
Get SHA256 hash from the contents of a given file
"""
with open(file, 'rb') as FH:
contents = FH.read()
return hashlib.sha256(contents).hexdigest()
|
Get SHA256 hash from the contents of a given file
|
def synonym(name):
"""
Utility function mimicking the behavior of the old SA synonym function
with the new hybrid property semantics.
"""
return hybrid_property(lambda inst: getattr(inst, name),
lambda inst, value: setattr(inst, name, value),
expr=lambda cls: getattr(cls, name))
|
Utility function mimicking the behavior of the old SA synonym function
with the new hybrid property semantics.
|
def fontsize(self, fontsize=None):
'''
Set or return size of current font.
:param fontsize: Size of font.
:return: Size of font (if fontsize was not specified)
'''
if fontsize is not None:
self._canvas.fontsize = fontsize
else:
return self._canvas.fontsize
|
Set or return size of current font.
:param fontsize: Size of font.
:return: Size of font (if fontsize was not specified)
|
def source(self):
"""
Returns the single source name for a variant collection if it is unique,
otherwise raises an error.
"""
if len(self.sources) == 0:
raise ValueError("No source associated with %s" % self.__class__.__name__)
elif len(self.sources) > 1:
raise ValueError("Multiple sources for %s" % self.__class__.__name__)
return list(self.sources)[0]
|
Returns the single source name for a variant collection if it is unique,
otherwise raises an error.
|
def _set_line_speed(self, v, load=False):
"""
Setter method for line_speed, mapped from YANG variable /interface/management/line_speed (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_line_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_line_speed() directly.
YANG Description: The line-speed characteristics for this management
interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=line_speed.line_speed, is_container='container', presence=False, yang_name="line-speed", rest_name="line-speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The line-speed characteristics for this management \ninterface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """line_speed must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=line_speed.line_speed, is_container='container', presence=False, yang_name="line-speed", rest_name="line-speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The line-speed characteristics for this management \ninterface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__line_speed = t
if hasattr(self, '_set'):
self._set()
|
Setter method for line_speed, mapped from YANG variable /interface/management/line_speed (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_line_speed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_line_speed() directly.
YANG Description: The line-speed characteristics for this management
interface.
|
def _init_metadata(self):
"""stub"""
super(MultiLanguageMultipleChoiceQuestionFormRecord, self)._init_metadata()
self._choices_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'choices'),
'element_label': 'choices',
'instructions': 'Enter as many text choices as you wish',
'required': True,
'read_only': False,
'linked': False,
'array': True,
'default_object_values': [[]],
'syntax': 'OBJECT',
'object_set': []
}
|
stub
|
def user_agent(self):
"""Return the formatted user agent string."""
components = ["/".join(x) for x in self.user_agent_components.items()]
return " ".join(components)
|
Return the formatted user agent string.
|
def createLinkToSelf(self, new_zone, callback=None, errback=None,
**kwargs):
"""
Create a new linked zone, linking to ourselves. All records in this
zone will then be available as "linked records" in the new zone.
:param str new_zone: the new zone name to link to this one
:return: new Zone
"""
zone = Zone(self.config, new_zone)
kwargs['link'] = self.data['zone']
return zone.create(callback=callback, errback=errback, **kwargs)
|
Create a new linked zone, linking to ourselves. All records in this
zone will then be available as "linked records" in the new zone.
:param str new_zone: the new zone name to link to this one
:return: new Zone
|
def feature_subset(self, indices):
""" Returns some subset of the features.
Parameters
----------
indices : :obj:`list` of :obj:`int`
indices of the features in the list
Returns
-------
:obj:`list` of :obj:`Feature`
"""
if isinstance(indices, np.ndarray):
indices = indices.tolist()
if not isinstance(indices, list):
raise ValueError('Can only index with lists')
return [self.features_[i] for i in indices]
|
Returns some subset of the features.
Parameters
----------
indices : :obj:`list` of :obj:`int`
indices of the features in the list
Returns
-------
:obj:`list` of :obj:`Feature`
|
def write_surf_params_to_file(self):
"""Write the params to file that surftool_Free needs to generate the surface facets"""
inp_file = self.water_surface_file + '_params.txt'
lg.info('Writing Inputs to file : ' + inp_file)
if self.surf_state == 'flat': # this is the only one that currently works.
lg.info('Surface Type is :: flat')
f = open(inp_file, 'w')
f.write('verbose= ' + str(self.verbose) + '\n')
f.write('band_count= ' + str(self.num_bands) + '\n')
f.write('band_centres_data= ')
f.write(",".join([str(wave) for wave in self.wavelengths]) + '\n')
f.write('partition= ' + self.partition + '\n')
f.write('vn= ' + str(self.vn) + '\n')
f.write('hn= ' + str(self.hn) + '\n')
f.write('theta_points= ')
f.write(",".join([str(theta) for theta in self.theta_points]) + '\n')
f.write('type= ' + self.iface_type + '\n')
f.write('refrac_index_0= ' + str(self.iface_0_ri) + '\n')
f.write('refrac_index_1= ' + str(self.iface_1_ri) + '\n')
f.write('wind_speed= ' + str(self.wind_speed) + '\n')
f.write('wind_direc= ' + str(self.wind_direc) + '\n')
f.write('crosswind_vertices= ' + str(self.crosswind_vertices) + '\n')
f.write('upwind_vertices= ' + str(self.upwind_vertices) + '\n')
f.write('surface_size= ' + str(self.surface_size) + '\n')
f.write('surface_radius=' + str(self.surface_radius) + '\n')
f.write('target_size= ' + str(self.target_size) + '\n')
f.write('rays_per_quad= ' + str(self.rays_per_quad) + '\n')
f.write('surface_count= ' + str(self.surface_count) + '\n')
f.write('azimuthally_average= ' + str(self.azimuthally_average) + '\n')
f.write('surface_save_fp= ' + inp_file.strip('_params.txt') + '\n')
f.flush()
f.close()
|
Write the params to file that surftool_Free needs to generate the surface facets
|
def restore(self):
"""
Unloads all modules that weren't loaded when save_modules was called.
"""
sys = set(self._sys_modules.keys())
for mod_name in sys.difference(self._saved_modules):
del self._sys_modules[mod_name]
|
Unloads all modules that weren't loaded when save_modules was called.
|
def get_info_consistent(self, ndim):
"""
Returns the main meta-data information adapted to the supplied
image dimensionality.
It will try to resolve inconsistencies and other conflicts,
altering the information avilable int he most plausible way.
Parameters
----------
ndim : int
image's dimensionality
Returns
-------
spacing : tuple of floats
offset : tuple of floats
direction : ndarray
"""
if ndim > len(self.spacing):
spacing = self.spacing + (1.0, ) * (ndim - len(self.spacing))
else:
spacing = self.spacing[:ndim]
if ndim > len(self.offset):
offset = self.offset + (0.0, ) * (ndim - len(self.offset))
else:
offset = self.offset[:ndim]
if ndim > self.direction.shape[0]:
direction = np.identity(ndim)
direction[:self.direction.shape[0], :self.direction.shape[0]] = self.direction
else:
direction = self.direction[:ndim, :ndim]
return spacing, offset, direction
|
Returns the main meta-data information adapted to the supplied
image dimensionality.
It will try to resolve inconsistencies and other conflicts,
altering the information avilable int he most plausible way.
Parameters
----------
ndim : int
image's dimensionality
Returns
-------
spacing : tuple of floats
offset : tuple of floats
direction : ndarray
|
def col_to_dt(df,col_name,set_format = None,infer_format = True, dest = False):
""" Coerces a column in a DataFrame to datetime
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
"""
new_col = _pd.to_datetime(df[col_name],errors = 'coerce',
format = set_format,infer_datetime_format = infer_format)
if dest:
set_col(df,col_name,new_col)
else:
return new_col
|
Coerces a column in a DataFrame to datetime
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def _store(self, uid, content, data=None):
"""Store the given dict of content at uid. Nothing returned."""
doc = dict(uid=uid)
if data:
gfs = gridfs.GridFS(self.db)
id = gfs.put(data, encoding='utf-8')
doc.update(data_id=id)
doc.update(content)
self.db.pastes.insert_one(doc)
|
Store the given dict of content at uid. Nothing returned.
|
def assignOrderNames(self):
"""
Assigns the order names for this tree based on the name of the
columns.
"""
try:
schema = self.tableType().schema()
except AttributeError:
return
for colname in self.columns():
column = schema.column(colname)
if column:
self.setColumnOrderName(colname, column.name())
|
Assigns the order names for this tree based on the name of the
columns.
|
def _get_instance_key(self, host, namespace, wmi_class, other=None):
"""
Return an index key for a given instance. Useful for caching.
"""
if other:
return "{host}:{namespace}:{wmi_class}-{other}".format(
host=host, namespace=namespace, wmi_class=wmi_class, other=other
)
return "{host}:{namespace}:{wmi_class}".format(host=host, namespace=namespace, wmi_class=wmi_class)
|
Return an index key for a given instance. Useful for caching.
|
def kernel_id(self):
"""Get kernel id"""
if self.connection_file is not None:
json_file = osp.basename(self.connection_file)
return json_file.split('.json')[0]
|
Get kernel id
|
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data["properties"]
address_pools = []
for content in properties.get("loadBalancerBackendAddressPools", []):
resource = Resource.from_raw_data(content)
address_pools.append(resource)
properties["loadBalancerBackendAddressPools"] = address_pools
nat_rules = []
for content in properties.get("loadBalancerInboundNatRules", None):
resource = Resource.from_raw_data(content)
nat_rules.append(resource)
properties["loadBalancerInboundNatRules"] = nat_rules
raw_content = properties.get("publicIPAddress", None)
if raw_content is not None:
resource = Resource.from_raw_data(raw_content)
properties["publicIPAddress"] = resource
raw_content = properties.get("serviceInsertion", None)
if raw_content is not None:
resource = Resource.from_raw_data(raw_content)
properties["serviceInsertion"] = resource
raw_content = properties.get("subnet", None)
if raw_content is not None:
resource = Resource.from_raw_data(raw_content)
properties["subnet"] = resource
return super(IPConfiguration, cls).process_raw_data(raw_data)
|
Create a new model using raw API response.
|
def parser_set(self, args):
"""Set config from an :py:class:`argparse.Namespace` object.
Call this method with the return value from
:py:meth:`~argparse.ArgumentParser.parse_args`.
:param argparse.Namespace args: The populated
:py:class:`argparse.Namespace` object.
"""
for key, value in vars(args).items():
self._parser_update(key, value)
|
Set config from an :py:class:`argparse.Namespace` object.
Call this method with the return value from
:py:meth:`~argparse.ArgumentParser.parse_args`.
:param argparse.Namespace args: The populated
:py:class:`argparse.Namespace` object.
|
def update_tcs_table(self):
"""
Periodically update a table of info from the TCS.
Only works at GTC
"""
g = get_root(self).globals
if not g.cpars['tcs_on'] or not g.cpars['telins_name'].lower() == 'gtc':
self.after(60000, self.update_tcs_table)
return
try:
tel_server = tcs.get_telescope_server()
telpars = tel_server.getTelescopeParams()
add_gtc_header_table_row(self.tcs_table, telpars)
except Exception as err:
g.clog.warn('Could not update table of TCS info')
# schedule next call for 60s later
self.after(60000, self.update_tcs_table)
|
Periodically update a table of info from the TCS.
Only works at GTC
|
def current_timestamp(self) -> datetime:
"""Get the current state timestamp."""
timestamp = DB.get_hash_value(self._key, 'current_timestamp')
return datetime_from_isoformat(timestamp)
|
Get the current state timestamp.
|
def RV_2(self):
"""Instantaneous RV of star 2 with respect to system center-of-mass
"""
return -self.orbpop_long.RV * (self.orbpop_long.M1 /
(self.orbpop_long.M1 + self.orbpop_long.M2)) +\
self.orbpop_short.RV_com1
|
Instantaneous RV of star 2 with respect to system center-of-mass
|
def check_obfuscated_ip (self):
"""Warn if host of this URL is obfuscated IP address."""
# check if self.host can be an IP address
# check for obfuscated IP address
if iputil.is_obfuscated_ip(self.host):
ips = iputil.resolve_host(self.host)
if ips:
self.host = ips[0]
self.add_warning(
_("URL %(url)s has obfuscated IP address %(ip)s") % \
{"url": self.base_url, "ip": ips[0]},
tag=WARN_URL_OBFUSCATED_IP)
|
Warn if host of this URL is obfuscated IP address.
|
def save_setting(self, setting_name, value):
"""Saves the setting value into the database."""
setting = self.get_setting(setting_name)
if setting is None:
setting = models.DashboardWidgetSettings.objects.create(
widget_name=self.get_name(),
setting_name=setting_name,
value=value)
setting.value = value
setting.save()
return setting
|
Saves the setting value into the database.
|
def visibility(cls, orb, **kwargs):
"""Visibility from a topocentric frame
see :py:meth:`Propagator.iter() <beyond.propagators.base.Propagator.iter>`
for description of arguments handling.
Args:
orb (Orbit): Orbit to compute visibility from the station with
Keyword Args:
start (Date): starting date of the visibility search
stop (Date or datetime.timedelta) end of the visibility search
step (datetime.timedelta): step of the computation
events (bool, Listener or list): If evaluate to True, compute
AOS, LOS and MAX elevation for each pass on this station.
If 'events' is a Listener or an iterable of Listeners, they
will be added to the computation
Any other keyword arguments are passed to the propagator.
Yield:
Orbit: In-visibility point of the orbit. This Orbit is already
in the frame of the station and in spherical form.
"""
from ..orbits.listeners import stations_listeners, Listener
listeners = kwargs.setdefault('listeners', [])
events = kwargs.pop('events', None)
event_classes = tuple()
if events:
# Handling of the listeners passed in the 'events' kwarg
# and merging them with the `listeners` kwarg
if isinstance(events, Listener):
listeners.append(events)
elif isinstance(events, (list, tuple)):
listeners.extend(events)
sta_list = stations_listeners(cls)
listeners.extend(sta_list)
# Only the events present in the `event_classes` list will be yielded
# outside of visibility. This list was created in order to force
# the yield of AOS and LOS.
event_classes = tuple(listener.event for listener in sta_list)
for point in orb.iter(**kwargs):
point.frame = cls
point.form = 'spherical'
# Not very clean !
if point.phi < 0 and not isinstance(point.event, event_classes):
continue
yield point
|
Visibility from a topocentric frame
see :py:meth:`Propagator.iter() <beyond.propagators.base.Propagator.iter>`
for description of arguments handling.
Args:
orb (Orbit): Orbit to compute visibility from the station with
Keyword Args:
start (Date): starting date of the visibility search
stop (Date or datetime.timedelta) end of the visibility search
step (datetime.timedelta): step of the computation
events (bool, Listener or list): If evaluate to True, compute
AOS, LOS and MAX elevation for each pass on this station.
If 'events' is a Listener or an iterable of Listeners, they
will be added to the computation
Any other keyword arguments are passed to the propagator.
Yield:
Orbit: In-visibility point of the orbit. This Orbit is already
in the frame of the station and in spherical form.
|
def tmpdir(prefix='npythy_tempdir_', delete=True):
'''
tmpdir() creates a temporary directory and yields its path. At python exit, the directory and
all of its contents are recursively deleted (so long as the the normal python exit process is
allowed to call the atexit handlers).
tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call.
The option delete may be set to False to specify that the tempdir should not be deleted on exit.
'''
path = tempfile.mkdtemp(prefix=prefix)
if not os.path.isdir(path): raise ValueError('Could not find or create temp directory')
if delete: atexit.register(shutil.rmtree, path)
return path
|
tmpdir() creates a temporary directory and yields its path. At python exit, the directory and
all of its contents are recursively deleted (so long as the the normal python exit process is
allowed to call the atexit handlers).
tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call.
The option delete may be set to False to specify that the tempdir should not be deleted on exit.
|
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
|
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
|
def register_actions(self, shortcut_manager):
"""Register callback methods for triggered actions
:param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings
between shortcuts and actions.
"""
shortcut_manager.add_callback_for_action("add", partial(self._add_new_state, state_type=StateType.EXECUTION))
shortcut_manager.add_callback_for_action("add_execution_state", partial(self._add_new_state,
state_type=StateType.EXECUTION))
shortcut_manager.add_callback_for_action("add_hierarchy_state", partial(self._add_new_state,
state_type=StateType.HIERARCHY))
shortcut_manager.add_callback_for_action("add_barrier_state", partial(self._add_new_state,
state_type=StateType.BARRIER_CONCURRENCY))
shortcut_manager.add_callback_for_action("add_preemptive_state", partial(self._add_new_state,
state_type=StateType.PREEMPTION_CONCURRENCY))
shortcut_manager.add_callback_for_action("add_output", partial(self._add_data_port_to_selected_state,
data_port_type='OUTPUT'))
shortcut_manager.add_callback_for_action("add_input", partial(self._add_data_port_to_selected_state,
data_port_type='INPUT'))
shortcut_manager.add_callback_for_action("add_scoped_variable", self._add_scoped_variable_to_selected_state)
shortcut_manager.add_callback_for_action("add_outcome", self._add_outcome_to_selected_state)
shortcut_manager.add_callback_for_action("delete", self._remove_selected_elements)
shortcut_manager.add_callback_for_action("copy", self._copy_selection)
shortcut_manager.add_callback_for_action("paste", self._paste_clipboard)
shortcut_manager.add_callback_for_action("cut", self._cut_selection)
shortcut_manager.add_callback_for_action('show_data_flows', self.update_view)
shortcut_manager.add_callback_for_action('show_data_values', self.update_view)
shortcut_manager.add_callback_for_action('data_flow_mode', self.data_flow_mode)
shortcut_manager.add_callback_for_action('show_aborted_preempted', self.update_view)
|
Register callback methods for triggered actions
:param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings
between shortcuts and actions.
|
def read(self, size):
"""Read raw bytes from the instrument.
:param size: amount of bytes to be sent to the instrument
:type size: integer
:return: received bytes
:return type: bytes
"""
raw_read = super(USBRawDevice, self).read
received = bytearray()
while not len(received) >= size:
resp = raw_read(self.RECV_CHUNK)
received.extend(resp)
return bytes(received)
|
Read raw bytes from the instrument.
:param size: amount of bytes to be sent to the instrument
:type size: integer
:return: received bytes
:return type: bytes
|
def get_parameters_at_instant(self, instant):
"""
Get the parameters of the legislation at a given instant
:param instant: string of the format 'YYYY-MM-DD' or `openfisca_core.periods.Instant` instance.
:returns: The parameters of the legislation at a given instant.
:rtype: :any:`ParameterNodeAtInstant`
"""
if isinstance(instant, periods.Period):
instant = instant.start
elif isinstance(instant, (str, int)):
instant = periods.instant(instant)
else:
assert isinstance(instant, periods.Instant), "Expected an Instant (e.g. Instant((2017, 1, 1)) ). Got: {}.".format(instant)
parameters_at_instant = self._parameters_at_instant_cache.get(instant)
if parameters_at_instant is None and self.parameters is not None:
parameters_at_instant = self.parameters.get_at_instant(str(instant))
self._parameters_at_instant_cache[instant] = parameters_at_instant
return parameters_at_instant
|
Get the parameters of the legislation at a given instant
:param instant: string of the format 'YYYY-MM-DD' or `openfisca_core.periods.Instant` instance.
:returns: The parameters of the legislation at a given instant.
:rtype: :any:`ParameterNodeAtInstant`
|
def download_file_from_google_drive(driveid, filename=None, destination=os.path.curdir):
""" Download script for google drive shared links
Thank you @turdus-merula and Andrew Hundt!
https://stackoverflow.com/a/39225039/623735
"""
if '&id=' in driveid:
# https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs # dailymail_stories.tgz
driveid = driveid.split('&id=')[-1]
if '?id=' in driveid:
# 'https://drive.google.com/open?id=14mELuzm0OvXnwjb0mzAiG-Ake9_NP_LQ' # SSD pretrainined keras model
driveid = driveid.split('?id=')[-1]
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': driveid}, stream=True)
token = get_response_confirmation_token(response)
if token:
params = {'id': driveid, 'confirm': token}
response = session.get(URL, params=params, stream=True)
filename = filename or get_url_filename(driveid=driveid)
full_destination_path = save_response_content(response, filename=fileanme, destination=destination)
return os.path.abspath(destination)
|
Download script for google drive shared links
Thank you @turdus-merula and Andrew Hundt!
https://stackoverflow.com/a/39225039/623735
|
def _dims_in_order(self, dimension_order):
'''
:param list dimension_order: A list of axes
:rtype: bool
:return: Returns True if the dimensions are in order U*, T, Z, Y, X,
False otherwise
'''
regx = regex.compile(r'^[^TZYX]*T?Z?Y?X?$')
dimension_string = ''.join(dimension_order)
return regx.match(dimension_string) is not None
|
:param list dimension_order: A list of axes
:rtype: bool
:return: Returns True if the dimensions are in order U*, T, Z, Y, X,
False otherwise
|
def from_str(cls, string):
"""
Creates a literal from a string
Parameters
----------
string : str
If the string starts with '!', it's interpreted as a negated variable
Returns
-------
caspo.core.literal.Literal
Created object instance
"""
if string[0] == '!':
signature = -1
variable = string[1:]
else:
signature = 1
variable = string
return cls(variable, signature)
|
Creates a literal from a string
Parameters
----------
string : str
If the string starts with '!', it's interpreted as a negated variable
Returns
-------
caspo.core.literal.Literal
Created object instance
|
def pci_lookup_name1(
access: (IN, ctypes.POINTER(pci_access)),
buf: (IN, ctypes.c_char_p),
size: (IN, ctypes.c_int),
flags: (IN, ctypes.c_int),
arg1: (IN, ctypes.c_int),
) -> ctypes.c_char_p:
"""
Conversion of PCI ID's to names (according to the pci.ids file).
char *pci_lookup_name(
struct pci_access *a, char *buf, int size, int flags, ...
) PCI_ABI;
This is a variant of pci_lookup_name() that gets called with one argument.
It is required because ctypes doesn't support varadic functions.
"""
pass
|
Conversion of PCI ID's to names (according to the pci.ids file).
char *pci_lookup_name(
struct pci_access *a, char *buf, int size, int flags, ...
) PCI_ABI;
This is a variant of pci_lookup_name() that gets called with one argument.
It is required because ctypes doesn't support varadic functions.
|
def _handle_stream(self, msg):
""" Handle stdout, stderr, and stdin.
"""
self.log.debug("stream: %s", msg.get('content', ''))
if not self._hidden and self._is_from_this_session(msg):
# Most consoles treat tabs as being 8 space characters. Convert tabs
# to spaces so that output looks as expected regardless of this
# widget's tab width.
text = msg['content']['data'].expandtabs(8)
self._append_plain_text(text, before_prompt=True)
self._control.moveCursor(QtGui.QTextCursor.End)
|
Handle stdout, stderr, and stdin.
|
def get(self, *args, **kwargs):
"""
Returns a single instance matching this query, optionally with additional filter kwargs.
See :ref:`retrieving-objects-with-filters`
Returns a single object matching the QuerySet.
.. code-block:: python
user = User.get(id=1)
If no objects are matched, a :class:`~.DoesNotExist` exception is raised.
If more than one object is found, a :class:`~.MultipleObjectsReturned` exception is raised.
"""
if args or kwargs:
return self.filter(*args, **kwargs).get()
self._execute_query()
# Check that the resultset only contains one element, avoiding sending a COUNT query
try:
self[1]
raise self.model.MultipleObjectsReturned('Multiple objects found')
except IndexError:
pass
try:
obj = self[0]
except IndexError:
raise self.model.DoesNotExist
return obj
|
Returns a single instance matching this query, optionally with additional filter kwargs.
See :ref:`retrieving-objects-with-filters`
Returns a single object matching the QuerySet.
.. code-block:: python
user = User.get(id=1)
If no objects are matched, a :class:`~.DoesNotExist` exception is raised.
If more than one object is found, a :class:`~.MultipleObjectsReturned` exception is raised.
|
def CreateTypes(self, allTypes):
"""
Create pyVmomi types from vmodl.reflect.DynamicTypeManager.AllTypeInfo
"""
enumTypes, dataTypes, managedTypes = self._ConvertAllTypes(allTypes)
self._CreateAllTypes(enumTypes, dataTypes, managedTypes)
|
Create pyVmomi types from vmodl.reflect.DynamicTypeManager.AllTypeInfo
|
def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.cloudfront import CloudFrontConnection
return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
|
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
|
def early_stopping(stopping_rounds, first_metric_only=False, verbose=True):
"""Create a callback that activates early stopping.
Note
----
Activates early stopping.
The model will train until the validation score stops improving.
Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
to continue training.
Requires at least one validation data and one metric.
If there's more than one, will check all of them. But the training data is ignored anyway.
To check only the first metric set ``first_metric_only`` to True.
Parameters
----------
stopping_rounds : int
The possible number of rounds without the trend occurrence.
first_metric_only : bool, optional (default=False)
Whether to use only the first metric for early stopping.
verbose : bool, optional (default=True)
Whether to print message with early stopping information.
Returns
-------
callback : function
The callback that activates early stopping.
"""
best_score = []
best_iter = []
best_score_list = []
cmp_op = []
enabled = [True]
def _init(env):
enabled[0] = not any((boost_alias in env.params
and env.params[boost_alias] == 'dart') for boost_alias in ('boosting',
'boosting_type',
'boost'))
if not enabled[0]:
warnings.warn('Early stopping is not available in dart mode')
return
if not env.evaluation_result_list:
raise ValueError('For early stopping, '
'at least one dataset and eval metric is required for evaluation')
if verbose:
msg = "Training until validation scores don't improve for {} rounds."
print(msg.format(stopping_rounds))
for eval_ret in env.evaluation_result_list:
best_iter.append(0)
best_score_list.append(None)
if eval_ret[3]:
best_score.append(float('-inf'))
cmp_op.append(gt)
else:
best_score.append(float('inf'))
cmp_op.append(lt)
def _callback(env):
if not cmp_op:
_init(env)
if not enabled[0]:
return
for i in range_(len(env.evaluation_result_list)):
score = env.evaluation_result_list[i][2]
if best_score_list[i] is None or cmp_op[i](score, best_score[i]):
best_score[i] = score
best_iter[i] = env.iteration
best_score_list[i] = env.evaluation_result_list
elif env.iteration - best_iter[i] >= stopping_rounds:
if verbose:
print('Early stopping, best iteration is:\n[%d]\t%s' % (
best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]])))
raise EarlyStopException(best_iter[i], best_score_list[i])
if env.iteration == env.end_iteration - 1:
if verbose:
print('Did not meet early stopping. Best iteration is:\n[%d]\t%s' % (
best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]])))
raise EarlyStopException(best_iter[i], best_score_list[i])
if first_metric_only: # the only first metric is used for early stopping
break
_callback.order = 30
return _callback
|
Create a callback that activates early stopping.
Note
----
Activates early stopping.
The model will train until the validation score stops improving.
Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
to continue training.
Requires at least one validation data and one metric.
If there's more than one, will check all of them. But the training data is ignored anyway.
To check only the first metric set ``first_metric_only`` to True.
Parameters
----------
stopping_rounds : int
The possible number of rounds without the trend occurrence.
first_metric_only : bool, optional (default=False)
Whether to use only the first metric for early stopping.
verbose : bool, optional (default=True)
Whether to print message with early stopping information.
Returns
-------
callback : function
The callback that activates early stopping.
|
def choose_colour(self, title="Select Colour", **kwargs):
"""
Show a Colour Chooser dialog
Usage: C{dialog.choose_colour(title="Select Colour")}
@param title: window title for the dialog
@return:
@rtype: C{DialogData(int, Optional[ColourData])}
"""
return_data = self._run_zenity(title, ["--color-selection"], kwargs)
if return_data.successful:
converted_colour = ColourData.from_zenity_tuple_str(return_data.data)
return DialogData(return_data.return_code, converted_colour)
else:
return DialogData(return_data.return_code, None)
|
Show a Colour Chooser dialog
Usage: C{dialog.choose_colour(title="Select Colour")}
@param title: window title for the dialog
@return:
@rtype: C{DialogData(int, Optional[ColourData])}
|
def _getitem(self, index):
"""Get a single non-slice index."""
row = self._records[index]
if row is not None:
pass
elif self.is_attached():
# need to handle negative indices manually
if index < 0:
index = len(self._records) + index
row = next((decode_row(line)
for i, line in self._enum_lines()
if i == index),
None)
if row is None:
raise ItsdbError('could not retrieve row in attached table')
else:
raise ItsdbError('invalid row in detached table: {}'.format(index))
return Record._make(self.fields, row, self, index)
|
Get a single non-slice index.
|
def find_disulfide_bridges(self, representative_only=True):
"""Run Biopython's disulfide bridge finder and store found bridges.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.annotations['SSBOND-biopython']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
"""
if representative_only:
if self.representative_structure:
try:
self.representative_structure.find_disulfide_bridges()
except KeyError:
log.error('{}: unable to run disulfide bridge finder on {}'.format(self.id, self.representative_structure))
else:
log.warning('{}: no representative structure set, cannot run disulfide bridge finder'.format(self.id))
else:
for s in self.structures:
try:
s.find_disulfide_bridges()
except KeyError:
log.error('{}: unable to run disulfide bridge finder on {}'.format(self.id, s.id))
|
Run Biopython's disulfide bridge finder and store found bridges.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.annotations['SSBOND-biopython']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
|
def Rect_to_wxRect(self, fr):
""" Return a zoomed wx.Rect for given fitz.Rect."""
r = (fr * self.zoom).irect # zoomed IRect
return wx.Rect(r.x0, r.y0, r.width, r.height)
|
Return a zoomed wx.Rect for given fitz.Rect.
|
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
|
Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
|
def hide_routemap_holder_route_map_content_set_ip_interface_null0(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
ip = ET.SubElement(set, "ip")
interface = ET.SubElement(ip, "interface")
null0 = ET.SubElement(interface, "null0")
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _SmallestColSize(self, text):
"""Finds the largest indivisible word of a string.
...and thus the smallest possible column width that can contain that
word unsplit over rows.
Args:
text: A string of text potentially consisting of words.
Returns:
Integer size of the largest single word in the text.
"""
if not text:
return 0
stripped = terminal.StripAnsiText(text)
return max(len(word) for word in stripped.split())
|
Finds the largest indivisible word of a string.
...and thus the smallest possible column width that can contain that
word unsplit over rows.
Args:
text: A string of text potentially consisting of words.
Returns:
Integer size of the largest single word in the text.
|
def get_row_by_fsid(self, fs_id):
'''确认在Liststore中是否存在这条任务. 如果存在, 返回TreeModelRow,
否则就返回None'''
for row in self.liststore:
if row[FSID_COL] == fs_id:
return row
return None
|
确认在Liststore中是否存在这条任务. 如果存在, 返回TreeModelRow,
否则就返回None
|
def nside2pixarea(nside, degrees=False):
"""Drop-in replacement for healpy `~healpy.pixelfunc.nside2pixarea`."""
area = nside_to_pixel_area(nside)
if degrees:
return area.to(u.deg ** 2).value
else:
return area.to(u.sr).value
|
Drop-in replacement for healpy `~healpy.pixelfunc.nside2pixarea`.
|
def sample_poly(self, poly, scalar=None, bias_range=1, poly_range=None,
ignored_terms=None, **parameters):
"""Scale and sample from the given binary polynomial.
If scalar is not given, problem is scaled based on bias and polynomial
ranges. See :meth:`.BinaryPolynomial.scale` and
:meth:`.BinaryPolynomial.normalize`
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
scalar (number, optional):
Value by which to scale the energy range of the binary polynomial.
bias_range (number/pair, optional, default=1):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
**parameters:
Other parameters for the sampling method, specified by
the child sampler.
"""
if ignored_terms is None:
ignored_terms = set()
else:
ignored_terms = {frozenset(term) for term in ignored_terms}
# scale and normalize happen in-place so we need to make a copy
original, poly = poly, poly.copy()
if scalar is not None:
poly.scale(scalar, ignored_terms=ignored_terms)
else:
poly.normalize(bias_range=bias_range, poly_range=poly_range,
ignored_terms=ignored_terms)
# we need to know how much we scaled by, which we can do by looking
# at the biases
try:
v = next(v for v, bias in original.items()
if bias and v not in ignored_terms)
except StopIteration:
# nothing to scale
scalar = 1
else:
scalar = poly[v] / original[v]
sampleset = self.child.sample_poly(poly, **parameters)
if ignored_terms:
# we need to recalculate the energy
sampleset.record.energy = original.energies((sampleset.record.sample,
sampleset.variables))
else:
sampleset.record.energy /= scalar
return sampleset
|
Scale and sample from the given binary polynomial.
If scalar is not given, problem is scaled based on bias and polynomial
ranges. See :meth:`.BinaryPolynomial.scale` and
:meth:`.BinaryPolynomial.normalize`
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
scalar (number, optional):
Value by which to scale the energy range of the binary polynomial.
bias_range (number/pair, optional, default=1):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
**parameters:
Other parameters for the sampling method, specified by
the child sampler.
|
def timeout(timeout):
"""
A decorator to timeout a function. Decorated method calls are executed in a separate new thread
with a specified timeout.
Also check if a thread for the same function already exists before creating a new one.
Note: Compatible with Windows (thread based).
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = "{0}:{1}:{2}:{3}".format(id(func), func.__name__, args, kwargs)
if key in _thread_by_func:
# A thread for the same function already exists.
worker = _thread_by_func[key]
else:
worker = ThreadMethod(func, args, kwargs)
_thread_by_func[key] = worker
worker.join(timeout)
if worker.is_alive():
raise TimeoutException()
del _thread_by_func[key]
if worker.exception:
raise worker.exception
else:
return worker.result
return wrapper
return decorator
|
A decorator to timeout a function. Decorated method calls are executed in a separate new thread
with a specified timeout.
Also check if a thread for the same function already exists before creating a new one.
Note: Compatible with Windows (thread based).
|
def save(self):
"""
Deletes the selected files from storage
"""
storage = get_media_storage()
for storage_name in self.cleaned_data['selected_files']:
full_path = storage.path(storage_name)
try:
storage.delete(storage_name)
self.success_files.append(full_path)
except OSError:
self.error_files.append(full_path)
|
Deletes the selected files from storage
|
def get_var(self, name, user=None):
"""
Retrieve a global or user variable
:param name: The name of the variable to retrieve
:type name: str
:param user: If retrieving a user variable, the user identifier
:type user: str or None
:rtype: str
:raises UserNotDefinedError: The specified user does not exist
:raises VarNotDefinedError: The requested variable has not been defined
"""
# Retrieve a user variable
if user is not None:
if user not in self._users:
raise UserNotDefinedError
return self._users[user].get_var(name)
# Retrieve a global variable
if name not in self._global_vars:
raise VarNotDefinedError
return self._global_vars[name]
|
Retrieve a global or user variable
:param name: The name of the variable to retrieve
:type name: str
:param user: If retrieving a user variable, the user identifier
:type user: str or None
:rtype: str
:raises UserNotDefinedError: The specified user does not exist
:raises VarNotDefinedError: The requested variable has not been defined
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.