code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def firsts(properties):
"""
Transform a dictionary of {name: [(elt, value)+]} (resulting from
get_properties) to a dictionary of {name, value} where names are first
encountered in input properties.
:param dict properties: properties to firsts.
:return: dictionary of parameter values by names.
:rtype: dict
"""
result = {}
# parse elts
for name in properties:
elt_properties = properties[name]
# add property values in result[name]
result[name] = elt_properties[0][1]
return result
|
Transform a dictionary of {name: [(elt, value)+]} (resulting from
get_properties) to a dictionary of {name, value} where names are first
encountered in input properties.
:param dict properties: properties to firsts.
:return: dictionary of parameter values by names.
:rtype: dict
|
def position_pnl(self):
"""
[float] 昨仓盈亏,策略在当前交易日产生的盈亏中来源于昨仓的部分
"""
last_price = self._data_proxy.get_last_price(self._order_book_id)
if self._direction == POSITION_DIRECTION.LONG:
price_spread = last_price - self._last_price
else:
price_spread = self._last_price - last_price
return self._logical_old_quantity * self._contract_multiplier * price_spread
|
[float] 昨仓盈亏,策略在当前交易日产生的盈亏中来源于昨仓的部分
|
def check_apps_permission(self, apps):
""" Checks if one of apps is listed in apps_dict
Since apps_dict is derived from the app_list
given by django admin, it lists only the apps
the user can view
"""
for app in apps:
if app in self.apps_dict:
return True
return False
|
Checks if one of apps is listed in apps_dict
Since apps_dict is derived from the app_list
given by django admin, it lists only the apps
the user can view
|
def _png(code, version, file, scale=1, module_color=(0, 0, 0, 255),
background=(255, 255, 255, 255), quiet_zone=4, debug=False):
"""See: pyqrcode.QRCode.png()
This function was abstracted away from QRCode to allow for the output of
QR codes during the build process, i.e. for debugging. It works
just the same except you must specify the code's version. This is needed
to calculate the PNG's size.
This method will write the given file out as a PNG file. Note, it
depends on the PyPNG module to do this.
:param module_color: Color of the QR code (default: ``(0, 0, 0, 255)`` (black))
:param background: Optional background color. If set to ``None`` the PNG
will have a transparent background.
(default: ``(255, 255, 255, 255)`` (white))
:param quiet_zone: Border around the QR code (also known as quiet zone)
(default: ``4``). Set to zero (``0``) if the code shouldn't
have a border.
:param debug: Inidicates if errors in the QR code should be added (as red
modules) to the output (default: ``False``).
"""
import png
# Coerce scale parameter into an integer
try:
scale = int(scale)
except ValueError:
raise ValueError('The scale parameter must be an integer')
def scale_code(size):
"""To perform the scaling we need to inflate the number of bits.
The PNG library expects all of the bits when it draws the PNG.
Effectively, we double, tripple, etc. the number of columns and
the number of rows.
"""
# This is one row's worth of each possible module
# PNG's use 0 for black and 1 for white, this is the
# reverse of the QR standard
black = [0] * scale
white = [1] * scale
# Tuple to lookup colors
# The 3rd color is the module_color unless "debug" is enabled
colors = (white, black, (([2] * scale) if debug else black))
# Whitespace added on the left and right side
border_module = white * quiet_zone
# This is the row to show up at the top and bottom border
border_row = [[1] * size] * scale * quiet_zone
# This will hold the final PNG's bits
bits = []
# Add scale rows before the code as a border,
# as per the standard
bits.extend(border_row)
# Add each row of the to the final PNG bits
for row in code:
tmp_row = []
# Add one all white module to the beginning
# to create the vertical border
tmp_row.extend(border_module)
# Go through each bit in the code
for bit in row:
# Use the standard color or the "debug" color
tmp_row.extend(colors[(bit if bit in (0, 1) else 2)])
# Add one all white module to the end
# to create the vertical border
tmp_row.extend(border_module)
# Copy each row scale times
for n in range(scale):
bits.append(tmp_row)
# Add the bottom border
bits.extend(border_row)
return bits
def png_pallete_color(color):
"""This creates a palette color from a list or tuple. The list or
tuple must be of length 3 (for rgb) or 4 (for rgba). The values
must be between 0 and 255. Note rgb colors will be given an added
alpha component set to 255.
The pallete color is represented as a list, this is what is returned.
"""
if color is None:
return ()
if not isinstance(color, (tuple, list)):
r, g, b = _hex_to_rgb(color)
return r, g, b, 255
rgba = []
if not (3 <= len(color) <= 4):
raise ValueError('Colors must be a list or tuple of length '
' 3 or 4. You passed in "{0}".'.format(color))
for c in color:
c = int(c)
if 0 <= c <= 255:
rgba.append(int(c))
else:
raise ValueError('Color components must be between 0 and 255')
# Make all colors have an alpha channel
if len(rgba) == 3:
rgba.append(255)
return tuple(rgba)
if module_color is None:
raise ValueError('The module_color must not be None')
bitdepth = 1
# foreground aka module color
fg_col = png_pallete_color(module_color)
transparent = background is None
# If background color is set to None, the inverse color of the
# foreground color is calculated
bg_col = png_pallete_color(background) if background is not None else tuple([255 - c for c in fg_col])
# Assume greyscale if module color is black and background color is white
greyscale = fg_col[:3] == (0, 0, 0) and (not debug and transparent or bg_col == (255, 255, 255, 255))
transparent_color = 1 if transparent and greyscale else None
palette = [fg_col, bg_col] if not greyscale else None
if debug:
# Add "red" as color for error modules
palette.append((255, 0, 0, 255))
bitdepth = 2
# The size of the PNG
size = _get_png_size(version, scale, quiet_zone)
# We need to increase the size of the code to match up to the
# scale parameter.
code_rows = scale_code(size)
# Write out the PNG
f, autoclose = _get_writable(file, 'wb')
w = png.Writer(width=size, height=size, greyscale=greyscale,
transparent=transparent_color, palette=palette,
bitdepth=bitdepth)
try:
w.write(f, code_rows)
finally:
if autoclose:
f.close()
|
See: pyqrcode.QRCode.png()
This function was abstracted away from QRCode to allow for the output of
QR codes during the build process, i.e. for debugging. It works
just the same except you must specify the code's version. This is needed
to calculate the PNG's size.
This method will write the given file out as a PNG file. Note, it
depends on the PyPNG module to do this.
:param module_color: Color of the QR code (default: ``(0, 0, 0, 255)`` (black))
:param background: Optional background color. If set to ``None`` the PNG
will have a transparent background.
(default: ``(255, 255, 255, 255)`` (white))
:param quiet_zone: Border around the QR code (also known as quiet zone)
(default: ``4``). Set to zero (``0``) if the code shouldn't
have a border.
:param debug: Inidicates if errors in the QR code should be added (as red
modules) to the output (default: ``False``).
|
def authenticate(self, request):
"""
Returns two-tuple of (user, token) if authentication succeeds,
or None otherwise.
"""
try:
oauth_request = oauth_provider.utils.get_oauth_request(request)
except oauth.Error as err:
raise exceptions.AuthenticationFailed(err.message)
if not oauth_request:
return None
oauth_params = oauth_provider.consts.OAUTH_PARAMETERS_NAMES
found = any(param for param in oauth_params if param in oauth_request)
missing = list(param for param in oauth_params if param not in oauth_request)
if not found:
# OAuth authentication was not attempted.
return None
if missing:
# OAuth was attempted but missing parameters.
msg = 'Missing parameters: %s' % (', '.join(missing))
raise exceptions.AuthenticationFailed(msg)
if not self.check_nonce(request, oauth_request):
msg = 'Nonce check failed'
raise exceptions.AuthenticationFailed(msg)
try:
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key)
except oauth_provider.store.InvalidConsumerError:
msg = 'Invalid consumer token: %s' % oauth_request.get_parameter('oauth_consumer_key')
raise exceptions.AuthenticationFailed(msg)
if consumer.status != oauth_provider.consts.ACCEPTED:
msg = 'Invalid consumer key status: %s' % consumer.get_status_display()
raise exceptions.AuthenticationFailed(msg)
try:
token_param = oauth_request.get_parameter('oauth_token')
token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param)
except oauth_provider.store.InvalidTokenError:
msg = 'Invalid access token: %s' % oauth_request.get_parameter('oauth_token')
raise exceptions.AuthenticationFailed(msg)
try:
self.validate_token(request, consumer, token)
except oauth.Error as err:
raise exceptions.AuthenticationFailed(err.message)
user = token.user
if not user.is_active:
msg = 'User inactive or deleted: %s' % user.username
raise exceptions.AuthenticationFailed(msg)
return (token.user, token)
|
Returns two-tuple of (user, token) if authentication succeeds,
or None otherwise.
|
def stonith_show(stonith_id, extra_args=None, cibfile=None):
'''
Show the value of a cluster stonith
stonith_id
name for the stonith resource
extra_args
additional options for the pcs stonith command
cibfile
use cibfile instead of the live CIB
CLI Example:
.. code-block:: bash
salt '*' pcs.stonith_show stonith_id='eps_fence' cibfile='/tmp/2_node_cluster.cib'
'''
return item_show(item='stonith', item_id=stonith_id, extra_args=extra_args, cibfile=cibfile)
|
Show the value of a cluster stonith
stonith_id
name for the stonith resource
extra_args
additional options for the pcs stonith command
cibfile
use cibfile instead of the live CIB
CLI Example:
.. code-block:: bash
salt '*' pcs.stonith_show stonith_id='eps_fence' cibfile='/tmp/2_node_cluster.cib'
|
def as_view(cls, action_map=None, **initkwargs):
"""
Allows custom request to method routing based on given ``action_map`` kwarg.
"""
# Needs to re-implement the method but contains all the things the parent does.
if not action_map: # actions must not be empty
raise TypeError("action_map is a required argument.")
def view(request):
self = cls(**initkwargs)
self.request = request
self.lookup_url_kwargs = self.request.matchdict
self.action_map = action_map
self.action = self.action_map.get(self.request.method.lower())
for method, action in action_map.items():
handler = getattr(self, action)
setattr(self, method, handler)
return self.dispatch(self.request, **self.request.matchdict)
return view
|
Allows custom request to method routing based on given ``action_map`` kwarg.
|
def find_lexer_class_by_name(_alias):
"""Lookup a lexer class by alias.
Like `get_lexer_by_name`, but does not instantiate the class.
.. versionadded:: 2.2
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
Lookup a lexer class by alias.
Like `get_lexer_by_name`, but does not instantiate the class.
.. versionadded:: 2.2
|
async def release_lease_async(self, lease):
"""
Give up a lease currently held by this host. If the lease has been stolen, or expired,
releasing it is unnecessary, and will fail if attempted.
:param lease: The stored lease to be released.
:type lease: ~azure.eventprocessorhost.lease.Lease
:return: `True` if the lease was released successfully, `False` if not.
:rtype: bool
"""
lease_id = None
try:
_logger.info("Releasing lease %r %r", self.host.guid, lease.partition_id)
lease_id = lease.token
released_copy = AzureBlobLease()
released_copy.with_lease(lease)
released_copy.token = None
released_copy.owner = None
released_copy.state = None
await self.host.loop.run_in_executor(
self.executor,
functools.partial(
self.storage_client.create_blob_from_text,
self.lease_container_name,
lease.partition_id,
json.dumps(released_copy.serializable()),
lease_id=lease_id))
await self.host.loop.run_in_executor(
self.executor,
functools.partial(
self.storage_client.release_blob_lease,
self.lease_container_name,
lease.partition_id,
lease_id))
except Exception as err: # pylint: disable=broad-except
_logger.error("Failed to release lease %r %r %r",
err, lease.partition_id, lease_id)
return False
return True
|
Give up a lease currently held by this host. If the lease has been stolen, or expired,
releasing it is unnecessary, and will fail if attempted.
:param lease: The stored lease to be released.
:type lease: ~azure.eventprocessorhost.lease.Lease
:return: `True` if the lease was released successfully, `False` if not.
:rtype: bool
|
def get_customs_properties_by_inheritance(self, obj):
"""
Get custom properties from the templates defined in this object
:param obj: the oject to search the property
:type obj: alignak.objects.item.Item
:return: list of custom properties
:rtype: list
"""
for t_id in obj.templates:
template = self.templates[t_id]
tpl_cv = self.get_customs_properties_by_inheritance(template)
if tpl_cv:
for prop in tpl_cv:
if prop not in obj.customs:
value = tpl_cv[prop]
else:
value = obj.customs[prop]
if obj.has_plus(prop):
value.insert(0, obj.get_plus_and_delete(prop))
# value = self.get_plus_and_delete(prop) + ',' + value
obj.customs[prop] = value
for prop in obj.customs:
value = obj.customs[prop]
if obj.has_plus(prop):
value.insert(0, obj.get_plus_and_delete(prop))
obj.customs[prop] = value
# We can get custom properties in plus, we need to get all
# entires and put
# them into customs
cust_in_plus = obj.get_all_plus_and_delete()
for prop in cust_in_plus:
obj.customs[prop] = cust_in_plus[prop]
return obj.customs
|
Get custom properties from the templates defined in this object
:param obj: the oject to search the property
:type obj: alignak.objects.item.Item
:return: list of custom properties
:rtype: list
|
def key_bytes(self):
"""Returns the raw signing key.
:rtype: bytes
"""
return self.key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
|
Returns the raw signing key.
:rtype: bytes
|
def add(self, host=None, f_community=None, f_access=None, f_version=None):
"""
Add an SNMP community string to a host
:param host: t_hosts.id or t_hosts.f_ipaddr
:param f_community: Community string to add
:param f_access: READ or WRITE
:param f_version: v1, v2c or v3
:return: (True/False, t_snmp.id/Error string)
"""
return self.send.snmp_add(host, f_community, f_access, f_version)
|
Add an SNMP community string to a host
:param host: t_hosts.id or t_hosts.f_ipaddr
:param f_community: Community string to add
:param f_access: READ or WRITE
:param f_version: v1, v2c or v3
:return: (True/False, t_snmp.id/Error string)
|
def cwd (self):
"""
Change to URL parent directory. Return filename of last path
component.
"""
path = self.urlparts[2].encode(self.filename_encoding, 'replace')
dirname = path.strip('/')
dirs = dirname.split('/')
filename = dirs.pop()
self.url_connection.cwd('/')
for d in dirs:
self.url_connection.cwd(d)
return filename
|
Change to URL parent directory. Return filename of last path
component.
|
def get_func(name, argtypes=None, restype=c_int, lib=libNLPIR):
"""Retrieves the corresponding NLPIR function.
:param str name: The name of the NLPIR function to get.
:param list argtypes: A list of :mod:`ctypes` data types that correspond
to the function's argument types.
:param restype: A :mod:`ctypes` data type that corresponds to the
function's return type (only needed if the return type isn't
:class:`ctypes.c_int`).
:param lib: A :class:`ctypes.CDLL` instance for the NLPIR API library where
the function will be retrieved from (defaults to :data:`libNLPIR`).
:returns: The exported function. It can be called like any other Python
callable.
"""
logger.debug("Getting NLPIR API function: 'name': '{}', 'argtypes': '{}',"
" 'restype': '{}'.".format(name, argtypes, restype))
func = getattr(lib, name)
if argtypes is not None:
func.argtypes = argtypes
if restype is not c_int:
func.restype = restype
logger.debug("NLPIR API function '{}' retrieved.".format(name))
return func
|
Retrieves the corresponding NLPIR function.
:param str name: The name of the NLPIR function to get.
:param list argtypes: A list of :mod:`ctypes` data types that correspond
to the function's argument types.
:param restype: A :mod:`ctypes` data type that corresponds to the
function's return type (only needed if the return type isn't
:class:`ctypes.c_int`).
:param lib: A :class:`ctypes.CDLL` instance for the NLPIR API library where
the function will be retrieved from (defaults to :data:`libNLPIR`).
:returns: The exported function. It can be called like any other Python
callable.
|
def rot1(theta):
"""
Args:
theta (float): Angle in radians
Return:
Rotation matrix of angle theta around the X-axis
"""
return np.array([
[1, 0, 0],
[0, np.cos(theta), np.sin(theta)],
[0, -np.sin(theta), np.cos(theta)]
])
|
Args:
theta (float): Angle in radians
Return:
Rotation matrix of angle theta around the X-axis
|
def _emit(self, s):
"""Append content to the main report file."""
if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all.
self._report_file.write(s)
self._report_file.flush()
|
Append content to the main report file.
|
def scalarmult_B(e):
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e %= L
P = IDENT
for i in range(253):
if e & 1:
P = edwards_add(P=P, Q=Bpow[i])
e //= 2
assert e == 0, e
return P
|
Implements scalarmult(B, e) more efficiently.
|
def _setup(self):
"""
Prepare the system for using ``ansible-galaxy`` and returns None.
:return: None
"""
role_directory = os.path.join(self._config.scenario.directory,
self.options['roles-path'])
if not os.path.isdir(role_directory):
os.makedirs(role_directory)
|
Prepare the system for using ``ansible-galaxy`` and returns None.
:return: None
|
def bkg_calc_interp1d(self, analytes=None, kind=1, n_min=10, n_max=None, cstep=None,
bkg_filter=False, f_win=7, f_n_lim=3, focus_stage='despiked'):
"""
Background calculation using a 1D interpolation.
scipy.interpolate.interp1D is used for interpolation.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
kind : str or int
Integer specifying the order of the spline interpolation
used, or string specifying a type of interpolation.
Passed to `scipy.interpolate.interp1D`
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
self.get_background(n_min=n_min, n_max=n_max,
bkg_filter=bkg_filter,
f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)
def pad(a, lo=None, hi=None):
if lo is None:
lo = [a[0]]
if hi is None:
hi = [a[-1]]
return np.concatenate((lo, a, hi))
if 'calc' not in self.bkg.keys():
# create time points to calculate background
# if cstep is None:
# cstep = self.bkg['raw']['uTime'].ptp() / 100
# bkg_t = np.arange(self.bkg['summary']['uTime']['mean'].min(),
# self.bkg['summary']['uTime']['mean'].max(),
# cstep)
bkg_t = pad(self.bkg['summary'].loc[:, ('uTime', 'mean')], [0], [self.max_time])
self.bkg['calc'] = Bunch()
self.bkg['calc']['uTime'] = bkg_t
d = self.bkg['summary']
with self.pbar.set(total=len(analytes), desc='Calculating Analyte Backgrounds') as prog:
for a in analytes:
self.bkg['calc'][a] = {'mean': pad(d.loc[:, (a, 'mean')].values),
'std': pad(d.loc[:, (a, 'std')].values),
'stderr': pad(d.loc[:, (a, 'stderr')].values)}
prog.update()
self.bkg['calc']
return
|
Background calculation using a 1D interpolation.
scipy.interpolate.interp1D is used for interpolation.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
kind : str or int
Integer specifying the order of the spline interpolation
used, or string specifying a type of interpolation.
Passed to `scipy.interpolate.interp1D`
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
|
def configure_delete(self, ns, definition):
"""
Register a delete endpoint.
The definition's func should be a delete function, which must:
- accept kwargs for path data
- return truthy/falsey
:param ns: the namespace
:param definition: the endpoint definition
"""
request_schema = definition.request_schema or Schema()
@self.add_route(ns.instance_path, Operation.Delete, ns)
@qs(request_schema)
@wraps(definition.func)
def delete(**path_data):
headers = dict()
request_data = load_query_string_data(request_schema)
response_data = require_response_data(definition.func(**merge_data(path_data, request_data)))
definition.header_func(headers, response_data)
response_format = self.negotiate_response_content(definition.response_formats)
return dump_response_data(
"",
None,
status_code=Operation.Delete.value.default_code,
headers=headers,
response_format=response_format,
)
delete.__doc__ = "Delete a {} by id".format(ns.subject_name)
|
Register a delete endpoint.
The definition's func should be a delete function, which must:
- accept kwargs for path data
- return truthy/falsey
:param ns: the namespace
:param definition: the endpoint definition
|
def visitObjectDef(self, ctx: jsgParser.ObjectDefContext):
""" objectDef: ID objectExpr """
name = as_token(ctx)
self._context.grammarelts[name] = JSGObjectExpr(self._context, ctx.objectExpr(), name)
|
objectDef: ID objectExpr
|
def select_sample(in_file, sample, out_file, config, filters=None):
"""Select a single sample from the supplied multisample VCF file.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
if len(get_samples(in_file)) == 1:
shutil.copy(in_file, tx_out_file)
else:
if in_file.endswith(".gz"):
bgzip_and_index(in_file, config)
bcftools = config_utils.get_program("bcftools", config)
output_type = "z" if out_file.endswith(".gz") else "v"
filter_str = "-f %s" % filters if filters is not None else "" # filters could be e.g. 'PASS,.'
cmd = "{bcftools} view -O {output_type} {filter_str} {in_file} -s {sample} > {tx_out_file}"
do.run(cmd.format(**locals()), "Select sample: %s" % sample)
if out_file.endswith(".gz"):
bgzip_and_index(out_file, config)
return out_file
|
Select a single sample from the supplied multisample VCF file.
|
def find_external_urls(self, entry):
"""
Find external URLs in an entry.
"""
soup = BeautifulSoup(entry.html_content, 'html.parser')
external_urls = [a['href'] for a in soup.find_all('a')
if self.is_external_url(
a['href'], self.ressources.site_url)]
return external_urls
|
Find external URLs in an entry.
|
def _getCharWidths(self, xref, bfname, ext, ordering, limit, idx=0):
"""Return list of glyphs and glyph widths of a font."""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document__getCharWidths(self, xref, bfname, ext, ordering, limit, idx)
|
Return list of glyphs and glyph widths of a font.
|
def parsedeglat (latstr):
"""Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed.
"""
deg = _parsesexagesimal (latstr, 'latitude', True)
if abs (deg) > 90:
raise ValueError ('illegal latitude specification: ' + latstr)
return deg * D2R
|
Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed.
|
def apply_config(self, config):
"""
Constructs HAProxyConfig and HAProxyControl instances based on the
contents of the config.
This is mostly a matter of constructing the configuration stanzas.
"""
self.haproxy_config_path = config["config_file"]
global_stanza = Stanza("global")
global_stanza.add_lines(config.get("global", []))
global_stanza.add_lines([
"stats socket %s mode 600 level admin" % config["socket_file"],
"stats timeout 2m"
])
defaults_stanza = Stanza("defaults")
defaults_stanza.add_lines(config.get("defaults", []))
proxy_stanzas = [
ProxyStanza(
name, proxy["port"], proxy["upstreams"],
proxy.get("options", []),
proxy.get("bind_address")
)
for name, proxy in six.iteritems(config.get("proxies", {}))
]
stats_stanza = None
if "stats" in config:
stats_stanza = StatsStanza(
config["stats"]["port"], config["stats"].get("uri", "/")
)
for timeout in ("client", "connect", "server"):
if timeout in config["stats"].get("timeouts", {}):
stats_stanza.add_line(
"timeout %s %d" % (
timeout,
config["stats"]["timeouts"][timeout]
)
)
self.config_file = HAProxyConfig(
global_stanza, defaults_stanza,
proxy_stanzas=proxy_stanzas, stats_stanza=stats_stanza,
meta_clusters=config.get("meta_clusters", {}),
bind_address=config.get("bind_address")
)
self.control = HAProxyControl(
config["config_file"], config["socket_file"], config["pid_file"],
)
|
Constructs HAProxyConfig and HAProxyControl instances based on the
contents of the config.
This is mostly a matter of constructing the configuration stanzas.
|
def rename_feature(self, old_feature, new_feature):
"""
Change the label of a feature attached to the Bundle
:parameter str old_feature: the current name of the feature
(must exist)
:parameter str new_feature: the desired new name of the feature
(must not exist)
:return: None
:raises ValueError: if the new_feature is forbidden
"""
# TODO: raise error if old_feature not found?
self._check_label(new_feature)
self._rename_label('feature', old_feature, new_feature)
|
Change the label of a feature attached to the Bundle
:parameter str old_feature: the current name of the feature
(must exist)
:parameter str new_feature: the desired new name of the feature
(must not exist)
:return: None
:raises ValueError: if the new_feature is forbidden
|
def _compute_scale(self, instruction_id, svg_dict):
"""Compute the scale of an instruction svg.
Compute the scale using the bounding box stored in the
:paramref:`svg_dict`. The scale is saved in a dictionary using
:paramref:`instruction_id` as key.
:param str instruction_id: id identifying a symbol in the defs
:param dict svg_dict: dictionary containing the SVG for the
instruction currently processed
"""
bbox = list(map(float, svg_dict["svg"]["@viewBox"].split()))
scale = self._zoom / (bbox[3] - bbox[1])
self._symbol_id_to_scale[instruction_id] = scale
|
Compute the scale of an instruction svg.
Compute the scale using the bounding box stored in the
:paramref:`svg_dict`. The scale is saved in a dictionary using
:paramref:`instruction_id` as key.
:param str instruction_id: id identifying a symbol in the defs
:param dict svg_dict: dictionary containing the SVG for the
instruction currently processed
|
def write(self, writer):
"""
Writes an XML representation of this node (including descendants) to the specified file-like object.
:param writer: An :class:`XmlWriter` instance to write this node to
"""
multiline = bool(self._children)
newline_start = multiline and not bool(self.data)
writer.start(self.tagname, self.attrs, newline=newline_start)
if self.data:
writer.data(self.data, newline=bool(self._children))
for c in self._children:
c.write(writer)
writer.end(self.tagname, indent=multiline)
|
Writes an XML representation of this node (including descendants) to the specified file-like object.
:param writer: An :class:`XmlWriter` instance to write this node to
|
def get_install_names(filename):
""" Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename`
"""
lines = _cmd_out_err(['otool', '-L', filename])
if not _line0_says_object(lines[0], filename):
return ()
names = tuple(parse_install_name(line)[0] for line in lines[1:])
install_id = get_install_id(filename)
if not install_id is None:
assert names[0] == install_id
return names[1:]
return names
|
Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename`
|
def add_access_list(self, loadbalancer, access_list):
"""
Adds the access list provided to the load balancer.
The 'access_list' should be a list of dicts in the following format:
[{"address": "192.0.43.10", "type": "DENY"},
{"address": "192.0.43.11", "type": "ALLOW"},
...
{"address": "192.0.43.99", "type": "DENY"},
]
If no access list exists, it is created. If an access list
already exists, it is updated with the provided list.
"""
req_body = {"accessList": access_list}
uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer)
resp, body = self.api.method_post(uri, body=req_body)
return body
|
Adds the access list provided to the load balancer.
The 'access_list' should be a list of dicts in the following format:
[{"address": "192.0.43.10", "type": "DENY"},
{"address": "192.0.43.11", "type": "ALLOW"},
...
{"address": "192.0.43.99", "type": "DENY"},
]
If no access list exists, it is created. If an access list
already exists, it is updated with the provided list.
|
def make(parser):
"""DEPRECATED
prepare OpenStack basic environment"""
s = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
def gen_pass_f(args):
gen_pass()
gen_pass_parser = s.add_parser('gen-pass', help='generate the password')
gen_pass_parser.set_defaults(func=gen_pass_f)
def cmd_f(args):
cmd(args.user, args.hosts.split(','), args.key_filename, args.password, args.run)
cmd_parser = s.add_parser('cmd', help='run command line on the target host')
cmd_parser.add_argument('--run', help='the command running on the remote node', action='store', default=None, dest='run')
cmd_parser.set_defaults(func=cmd_f)
|
DEPRECATED
prepare OpenStack basic environment
|
def ReadSerializedDict(cls, json_dict):
"""Reads an attribute container from serialized dictionary form.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
AttributeContainer: attribute container or None.
Raises:
TypeError: if the serialized dictionary does not contain an
AttributeContainer.
"""
if json_dict:
json_object = cls._ConvertDictToObject(json_dict)
if not isinstance(json_object, containers_interface.AttributeContainer):
raise TypeError('{0:s} is not an attribute container type.'.format(
type(json_object)))
return json_object
return None
|
Reads an attribute container from serialized dictionary form.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
AttributeContainer: attribute container or None.
Raises:
TypeError: if the serialized dictionary does not contain an
AttributeContainer.
|
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = super().get_context_data(**kwargs)
context['poster'] = self.poster
return context
|
Returns the context data to provide to the template.
|
def _needed_markup_bot(self):
"""
Returns the input peer of the bot that's needed for the reply markup.
This is necessary for :tl:`KeyboardButtonSwitchInline` since we need
to know what bot we want to start. Raises ``ValueError`` if the bot
cannot be found but is needed. Returns ``None`` if it's not needed.
"""
if not isinstance(self.reply_markup, (
types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)):
return None
for row in self.reply_markup.rows:
for button in row.buttons:
if isinstance(button, types.KeyboardButtonSwitchInline):
if button.same_peer:
bot = self.input_sender
if not bot:
raise ValueError('No input sender')
else:
try:
return self._client._entity_cache[self.via_bot_id]
except KeyError:
raise ValueError('No input sender') from None
|
Returns the input peer of the bot that's needed for the reply markup.
This is necessary for :tl:`KeyboardButtonSwitchInline` since we need
to know what bot we want to start. Raises ``ValueError`` if the bot
cannot be found but is needed. Returns ``None`` if it's not needed.
|
def more_like_this(self, query, fields, columns=None, start=0, rows=30):
"""
Retrieves "more like this" results for a passed query document
query - query for a document on which to base similar documents
fields - fields on which to base similarity estimation (either comma delimited string or a list)
columns - columns to return (list of strings)
start - start number for first result (used in pagination)
rows - number of rows to return (used for pagination, defaults to 30)
"""
if isinstance(fields, basestring):
mlt_fields = fields
else:
mlt_fields = ",".join(fields)
if columns is None:
columns = ["*", "score"]
fields = {'q' : query,
'json.nl': 'map',
'mlt.fl': mlt_fields,
'fl': ",".join(columns),
'start': str(start),
'rows': str(rows),
'wt': "json"}
if len(self.endpoints) > 1:
fields["shards"] = self._get_shards()
assert self.default_endpoint in self.endpoints
request_url = _get_url(self.endpoints[self.default_endpoint], "mlt")
results = self._send_solr_query(request_url, fields)
if not results:
return None
assert "responseHeader" in results
# Check for response status
if not results.get("responseHeader").get("status") == 0:
logger.error("Server error while retrieving results: %s", results)
return None
assert "response" in results
result_obj = self._parse_response(results)
return result_obj
|
Retrieves "more like this" results for a passed query document
query - query for a document on which to base similar documents
fields - fields on which to base similarity estimation (either comma delimited string or a list)
columns - columns to return (list of strings)
start - start number for first result (used in pagination)
rows - number of rows to return (used for pagination, defaults to 30)
|
def update_device(name, **kwargs):
'''
.. versionadded:: 2019.2.0
Add attributes to an existing device, identified by name.
name
The name of the device, e.g., ``edge_router``
kwargs
Arguments to change in device, e.g., ``serial=JN2932930``
CLI Example:
.. code-block:: bash
salt myminion netbox.update_device edge_router serial=JN2932920
'''
kwargs = __utils__['args.clean_kwargs'](**kwargs)
nb_device = _get('dcim', 'devices', auth_required=True, name=name)
for k, v in kwargs.items():
setattr(nb_device, k, v)
try:
nb_device.save()
return {'dcim': {'devices': kwargs}}
except RequestError as e:
log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error)
return False
|
.. versionadded:: 2019.2.0
Add attributes to an existing device, identified by name.
name
The name of the device, e.g., ``edge_router``
kwargs
Arguments to change in device, e.g., ``serial=JN2932930``
CLI Example:
.. code-block:: bash
salt myminion netbox.update_device edge_router serial=JN2932920
|
def solubility_eutectic(T, Tm, Hm, Cpl=0, Cps=0, gamma=1):
r'''Returns the maximum solubility of a solute in a solvent.
.. math::
\ln x_i^L \gamma_i^L = \frac{\Delta H_{m,i}}{RT}\left(
1 - \frac{T}{T_{m,i}}\right) - \frac{\Delta C_{p,i}(T_{m,i}-T)}{RT}
+ \frac{\Delta C_{p,i}}{R}\ln\frac{T_m}{T}
\Delta C_{p,i} = C_{p,i}^L - C_{p,i}^S
Parameters
----------
T : float
Temperature of the system [K]
Tm : float
Melting temperature of the solute [K]
Hm : float
Heat of melting at the melting temperature of the solute [J/mol]
Cpl : float, optional
Molar heat capacity of the solute as a liquid [J/mol/K]
Cpls: float, optional
Molar heat capacity of the solute as a solid [J/mol/K]
gamma : float, optional
Activity coefficient of the solute as a liquid [-]
Returns
-------
x : float
Mole fraction of solute at maximum solubility [-]
Notes
-----
gamma is of the solute in liquid phase
Examples
--------
From [1]_, matching example
>>> solubility_eutectic(T=260., Tm=278.68, Hm=9952., Cpl=0, Cps=0, gamma=3.0176)
0.24340068761677464
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
dCp = Cpl-Cps
x = exp(- Hm/R/T*(1-T/Tm) + dCp*(Tm-T)/R/T - dCp/R*log(Tm/T))/gamma
return x
|
r'''Returns the maximum solubility of a solute in a solvent.
.. math::
\ln x_i^L \gamma_i^L = \frac{\Delta H_{m,i}}{RT}\left(
1 - \frac{T}{T_{m,i}}\right) - \frac{\Delta C_{p,i}(T_{m,i}-T)}{RT}
+ \frac{\Delta C_{p,i}}{R}\ln\frac{T_m}{T}
\Delta C_{p,i} = C_{p,i}^L - C_{p,i}^S
Parameters
----------
T : float
Temperature of the system [K]
Tm : float
Melting temperature of the solute [K]
Hm : float
Heat of melting at the melting temperature of the solute [J/mol]
Cpl : float, optional
Molar heat capacity of the solute as a liquid [J/mol/K]
Cpls: float, optional
Molar heat capacity of the solute as a solid [J/mol/K]
gamma : float, optional
Activity coefficient of the solute as a liquid [-]
Returns
-------
x : float
Mole fraction of solute at maximum solubility [-]
Notes
-----
gamma is of the solute in liquid phase
Examples
--------
From [1]_, matching example
>>> solubility_eutectic(T=260., Tm=278.68, Hm=9952., Cpl=0, Cps=0, gamma=3.0176)
0.24340068761677464
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
|
def rotate_x(self, deg):
"""Rotate mesh around x-axis
:param float deg: Rotation angle (degree)
:return:
"""
rad = math.radians(deg)
mat = numpy.array([
[1, 0, 0, 0],
[0, math.cos(rad), math.sin(rad), 0],
[0, -math.sin(rad), math.cos(rad), 0],
[0, 0, 0, 1]
])
self.vectors = self.vectors.dot(mat)
return self
|
Rotate mesh around x-axis
:param float deg: Rotation angle (degree)
:return:
|
def remove_account(self, name):
"""
Remove an account from the account's sub accounts.
:param name: The name of the account to remove.
"""
acc_to_remove = None
for a in self.accounts:
if a.name == name:
acc_to_remove = a
if acc_to_remove is not None:
self.accounts.remove(acc_to_remove)
|
Remove an account from the account's sub accounts.
:param name: The name of the account to remove.
|
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(MetaDataVersionOID=str(self.metadata_version_oid),
StudyOID="%s (%s)" % (self.projectname, self.environment,),
)
# mixins
self.mixin_params(params)
builder.start("ClinicalData", params)
# Ask children
if self.subject_data:
for subject in self.subject_data:
subject.build(builder)
# Add the Annotations
if self.annotations is not None:
self.annotations.build(builder)
builder.end("ClinicalData")
|
Build XML by appending to builder
|
def createdb():
"""Create database tables from sqlalchemy models"""
manager.db.engine.echo = True
manager.db.create_all()
set_alembic_revision()
|
Create database tables from sqlalchemy models
|
def _parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.next()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
|
Parse a Content-type like header.
Return the main content-type and a dictionary of options.
|
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{0}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{0}{1}\n".format(key, value))
fp.write("\n")
|
Write a single section to the specified `fp'.
|
def coupling_efficiency(mode_solver, fibre_mfd,
fibre_offset_x=0, fibre_offset_y=0,
n_eff_fibre=1.441):
'''
Finds the coupling efficiency between a solved
fundamental mode and a fibre of given MFD.
Args:
mode_solver (_ModeSolver): Mode solver that
has found a fundamental mode.
fibre_mfd (float): The mode-field diameter
(MFD) of the fibre.
fibre_offset_x (float): Offset the fibre
from the centre position of the window
in x. Default is 0 (no offset).
fibre_offset_y (float): Offset the fibre
from the centre position of the window
in y. Default is 0 (no offset).
n_eff_fibre (float): The effective index
of the fibre mode. Default is 1.441.
Returns:
float: The power coupling efficiency.
'''
etas = []
gaus = _make_gaussian(mode_solver._structure.xc, mode_solver._structure.yc,
fibre_mfd, fibre_offset_x, fibre_offset_y)
for mode, n_eff in zip(mode_solver.modes, mode_solver.n_effs):
o = abs(_overlap(mode, gaus))
t = abs(transmission(n_eff, n_eff_fibre))
eta = o * t
etas.append(eta)
return etas
|
Finds the coupling efficiency between a solved
fundamental mode and a fibre of given MFD.
Args:
mode_solver (_ModeSolver): Mode solver that
has found a fundamental mode.
fibre_mfd (float): The mode-field diameter
(MFD) of the fibre.
fibre_offset_x (float): Offset the fibre
from the centre position of the window
in x. Default is 0 (no offset).
fibre_offset_y (float): Offset the fibre
from the centre position of the window
in y. Default is 0 (no offset).
n_eff_fibre (float): The effective index
of the fibre mode. Default is 1.441.
Returns:
float: The power coupling efficiency.
|
def get_default_config(self):
"""
Returns default configuration options.
"""
config = super(NetfilterAccountingCollector, self).get_default_config()
config.update({
'path': 'nfacct',
'bin': 'nfacct',
'use_sudo': False,
'reset': True,
'sudo_cmd': '/usr/bin/sudo',
'method': 'Threaded'
})
return config
|
Returns default configuration options.
|
def fingerprint(dirnames, prefix=None, previous=[]):
#pylint:disable=dangerous-default-value
"""
Returns a list of paths available from *dirname*. When previous
is specified, returns a list of additional files only.
Example:
[{ "Key": "abc.txt",
"LastModified": "Mon, 05 Jan 2015 12:00:00 UTC"},
{ "Key": "def.txt",
"LastModified": "Mon, 05 Jan 2015 12:00:001 UTC"},
]
"""
results = []
for dirname in dirnames:
for filename in os.listdir(dirname):
fullpath = os.path.join(dirname, filename)
if os.path.isdir(fullpath):
results += fingerprint(
[fullpath], prefix=filename, previous=previous)
else:
fullname = fullpath
if prefix and fullname.startswith(prefix):
fullname = fullname[len(prefix):]
found = False
for prevpath in previous:
if fullname == prevpath['Key']:
found = True
break
if not found:
mtime = datetime.datetime.fromtimestamp(
os.path.getmtime(fullpath), tz=utc)
results += [{"Key": fullname,
"LastModified": mtime.strftime(
'%a, %d %b %Y %H:%M:%S %Z')}]
return results
|
Returns a list of paths available from *dirname*. When previous
is specified, returns a list of additional files only.
Example:
[{ "Key": "abc.txt",
"LastModified": "Mon, 05 Jan 2015 12:00:00 UTC"},
{ "Key": "def.txt",
"LastModified": "Mon, 05 Jan 2015 12:00:001 UTC"},
]
|
def using(self, client):
"""
Associate the search request with an elasticsearch client. A fresh copy
will be returned with current instance remaining unchanged.
:arg client: an instance of ``elasticsearch.Elasticsearch`` to use or
an alias to look up in ``elasticsearch_dsl.connections``
"""
s = self._clone()
s._using = client
return s
|
Associate the search request with an elasticsearch client. A fresh copy
will be returned with current instance remaining unchanged.
:arg client: an instance of ``elasticsearch.Elasticsearch`` to use or
an alias to look up in ``elasticsearch_dsl.connections``
|
def getWorkDirs():
"""get input/output dirs (same input/output layout as for package)"""
# get caller module
caller_fullurl = inspect.stack()[1][1]
caller_relurl = os.path.relpath(caller_fullurl)
caller_modurl = os.path.splitext(caller_relurl)[0]
# split caller_url & append 'Dir' to package name
dirs = caller_modurl.split('/')
dirs[0] = 'data' # TODO de-hardcode
# get, check and create outdir
outDir = os.path.join(*(['output'] + dirs[1:]))
if not os.path.exists(outDir): os.makedirs(outDir)
# get and check indir
dirs.append('input')
inDir = os.path.join(*dirs)
if not os.path.exists(inDir):
logging.critical('create input dir %s to continue!' % inDir)
sys.exit(1)
return inDir, outDir
|
get input/output dirs (same input/output layout as for package)
|
def get_name_history( self, name, offset=None, count=None, reverse=False):
"""
Get the historic states for a name, grouped by block height.
"""
cur = self.db.cursor()
name_hist = namedb_get_history( cur, name, offset=offset, count=count, reverse=reverse )
return name_hist
|
Get the historic states for a name, grouped by block height.
|
def remove_root_bank(self, bank_id):
"""Removes a root bank from this hierarchy.
arg: bank_id (osid.id.Id): the ``Id`` of a bank
raise: NotFound - ``bank_id`` not a parent of ``child_id``
raise: NullArgument - ``bank_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_root_catalog(catalog_id=bank_id)
return self._hierarchy_session.remove_root(id_=bank_id)
|
Removes a root bank from this hierarchy.
arg: bank_id (osid.id.Id): the ``Id`` of a bank
raise: NotFound - ``bank_id`` not a parent of ``child_id``
raise: NullArgument - ``bank_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
async def minizinc(
mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None,
globals_dir=None, declare_enums=True, allow_multiple_assignments=False,
keep=False, output_vars=None, output_base=None, output_mode='dict',
solver=None, timeout=None, two_pass=None, pre_passes=None,
output_objective=False, non_unique=False, all_solutions=False,
num_solutions=None, free_search=False, parallel=None, seed=None,
rebase_arrays=True, keep_solutions=True, return_enums=False,
max_queue_size=0, **kwargs
):
"""Coroutine version of the ``pymzn.minizinc`` function.
Parameters
----------
max_queue_size : int
Maximum number of solutions in the queue between the solution parser and
the returned solution stream. When the queue is full, the solver
execution will halt untill an item of the queue is consumed. This option
is useful for memory management in cases where the solution stream gets
very large and the caller cannot consume solutions as fast as they are
produced. Use with care, if the full solution stream is not consumed
before the execution of the Python program ends it may result in the
solver becoming a zombie process. Default is ``0``, meaning an infinite
queue.
"""
mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \
_minizinc_preliminaries(
mzn, *dzn_files, args=args, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_vars=output_vars, keep=keep, output_base=output_base,
output_mode=output_mode, declare_enums=declare_enums,
allow_multiple_assignments=allow_multiple_assignments
)
if not solver:
solver = config.get('solver', gecode)
solver_args = {**kwargs, **config.get('solver_args', {})}
proc = await solve(
solver, mzn_file, *dzn_files, data=data, include=include,
stdlib_dir=stdlib_dir, globals_dir=globals_dir,
output_mode=_output_mode, timeout=timeout, two_pass=two_pass,
pre_passes=pre_passes, output_objective=output_objective,
non_unique=non_unique, all_solutions=all_solutions,
num_solutions=num_solutions, free_search=free_search, parallel=parallel,
seed=seed, allow_multiple_assignments=allow_multiple_assignments,
**solver_args
)
if output_mode == 'raw':
solns = asyncio.Queue(maxsize=max_queue_size)
task = asyncio.create_task(_collect(proc, solns))
else:
parser = AsyncSolutionParser(
solver, output_mode=output_mode, rebase_arrays=rebase_arrays,
types=types, keep_solutions=keep_solutions,
return_enums=return_enums, max_queue_size=max_queue_size
)
solns = await parser.parse(proc)
task = parser.parse_task
if not keep:
task.add_done_callback(partial(_cleanup_cb, [mzn_file, data_file]))
return solns
|
Coroutine version of the ``pymzn.minizinc`` function.
Parameters
----------
max_queue_size : int
Maximum number of solutions in the queue between the solution parser and
the returned solution stream. When the queue is full, the solver
execution will halt untill an item of the queue is consumed. This option
is useful for memory management in cases where the solution stream gets
very large and the caller cannot consume solutions as fast as they are
produced. Use with care, if the full solution stream is not consumed
before the execution of the Python program ends it may result in the
solver becoming a zombie process. Default is ``0``, meaning an infinite
queue.
|
def single_run_arrays(spanning_cluster=True, **kwargs):
r'''
Generate statistics for a single run
This is a stand-alone helper function to evolve a single sample state
(realization) and return the cluster statistics.
Parameters
----------
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
kwargs : keyword arguments
Piped through to :func:`sample_states`
Returns
-------
ret : dict
Cluster statistics
ret['N'] : int
Total number of sites
ret['M'] : int
Total number of bonds
ret['max_cluster_size'] : 1-D :py:class:`numpy.ndarray` of int, size ``ret['M'] + 1``
Array of the sizes of the largest cluster (absolute number of sites) at
the respective occupation number.
ret['has_spanning_cluster'] : 1-D :py:class:`numpy.ndarray` of bool, size ``ret['M'] + 1``
Array of booleans for each occupation number.
The respective entry is ``True`` if there is a spanning cluster,
``False`` otherwise.
Only exists if `spanning_cluster` argument is set to ``True``.
ret['moments'] : 2-D :py:class:`numpy.ndarray` of int
Array of shape ``(5, ret['M'] + 1)``.
The ``(k, m)``-th entry is the ``k``-th raw moment of the (absolute)
cluster size distribution, with ``k`` ranging from ``0`` to ``4``, at
occupation number ``m``.
See Also
--------
sample_states
'''
# initial iteration
# we do not need a copy of the result dictionary since we copy the values
# anyway
kwargs['copy_result'] = False
ret = dict()
for n, state in enumerate(sample_states(
spanning_cluster=spanning_cluster, **kwargs
)):
# merge cluster statistics
if 'N' in ret:
assert ret['N'] == state['N']
else:
ret['N'] = state['N']
if 'M' in ret:
assert ret['M'] == state['M']
else:
ret['M'] = state['M']
number_of_states = state['M'] + 1
max_cluster_size = np.empty(number_of_states)
if spanning_cluster:
has_spanning_cluster = np.empty(number_of_states, dtype=np.bool)
moments = np.empty((5, number_of_states))
max_cluster_size[n] = state['max_cluster_size']
for k in range(5):
moments[k, n] = state['moments'][k]
if spanning_cluster:
has_spanning_cluster[n] = state['has_spanning_cluster']
ret['max_cluster_size'] = max_cluster_size
ret['moments'] = moments
if spanning_cluster:
ret['has_spanning_cluster'] = has_spanning_cluster
return ret
|
r'''
Generate statistics for a single run
This is a stand-alone helper function to evolve a single sample state
(realization) and return the cluster statistics.
Parameters
----------
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
kwargs : keyword arguments
Piped through to :func:`sample_states`
Returns
-------
ret : dict
Cluster statistics
ret['N'] : int
Total number of sites
ret['M'] : int
Total number of bonds
ret['max_cluster_size'] : 1-D :py:class:`numpy.ndarray` of int, size ``ret['M'] + 1``
Array of the sizes of the largest cluster (absolute number of sites) at
the respective occupation number.
ret['has_spanning_cluster'] : 1-D :py:class:`numpy.ndarray` of bool, size ``ret['M'] + 1``
Array of booleans for each occupation number.
The respective entry is ``True`` if there is a spanning cluster,
``False`` otherwise.
Only exists if `spanning_cluster` argument is set to ``True``.
ret['moments'] : 2-D :py:class:`numpy.ndarray` of int
Array of shape ``(5, ret['M'] + 1)``.
The ``(k, m)``-th entry is the ``k``-th raw moment of the (absolute)
cluster size distribution, with ``k`` ranging from ``0`` to ``4``, at
occupation number ``m``.
See Also
--------
sample_states
|
def get_transcript(self, gene_pk, refseq_id):
"Get a transcript from the cache or add a new record."
if not refseq_id:
return
transcript_pk = self.transcripts.get(refseq_id)
if transcript_pk:
return transcript_pk
gene = Gene(pk=gene_pk)
transcript = Transcript(refseq_id=refseq_id, gene=gene)
try:
transcript.save()
except IntegrityError:
transcript = Transcript.objects.get(refseq_id=refseq_id, gene=gene)
self.transcripts[refseq_id] = transcript.pk
return transcript.pk
|
Get a transcript from the cache or add a new record.
|
def get_partstudio_tessellatededges(self, did, wid, eid):
'''
Gets the tessellation of the edges of all parts in a part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
'''
return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges')
|
Gets the tessellation of the edges of all parts in a part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
|
def defer(self, func, *args, **kwargs):
"""
Arrange for `func()` to execute on the broker thread. This function
returns immediately without waiting the result of `func()`. Use
:meth:`defer_sync` to block until a result is available.
:raises mitogen.core.Error:
:meth:`defer` was called after :class:`Broker` has begun shutdown.
"""
if thread.get_ident() == self.broker_ident:
_vv and IOLOG.debug('%r.defer() [immediate]', self)
return func(*args, **kwargs)
if self._broker._exitted:
raise Error(self.broker_shutdown_msg)
_vv and IOLOG.debug('%r.defer() [fd=%r]', self, self.transmit_side.fd)
self._lock.acquire()
try:
if not self._deferred:
self._wake()
self._deferred.append((func, args, kwargs))
finally:
self._lock.release()
|
Arrange for `func()` to execute on the broker thread. This function
returns immediately without waiting the result of `func()`. Use
:meth:`defer_sync` to block until a result is available.
:raises mitogen.core.Error:
:meth:`defer` was called after :class:`Broker` has begun shutdown.
|
def from_data(room, conn, data):
"""Construct a ChatMessage instance from raw protocol data"""
files = list()
rooms = dict()
msg = str()
for part in data["message"]:
ptype = part["type"]
if ptype == "text":
val = part["value"]
msg += val
elif ptype == "break":
msg += "\n"
elif ptype == "file":
fileid = part["id"]
fileobj = room.filedict.get(fileid)
if fileobj:
files += (fileobj,)
fileid = f"@{fileid}"
msg += fileid
elif ptype == "room":
roomid = part["id"]
rooms[roomid] = part["name"]
roomid = f"#{roomid}"
msg += roomid
elif ptype == "url":
msg += part["text"]
elif ptype == "raw":
msg += html_to_text(part["value"])
else:
import warnings
warnings.warn(f"unknown message type '{ptype}'", Warning)
nick = data.get("nick") or data.get("user")
options = data.get("options", dict())
data = data.get("data", dict())
message = ChatMessage(
room,
conn,
nick,
msg,
roles=Roles.from_options(options),
options=options,
data=data,
files=files,
rooms=rooms,
)
return message
|
Construct a ChatMessage instance from raw protocol data
|
def create_record_sets(self, record_set_dicts):
"""Accept list of record_set dicts.
Return list of record_set objects."""
record_set_objects = []
for record_set_dict in record_set_dicts:
# pop removes the 'Enabled' key and tests if True.
if record_set_dict.pop('Enabled', True):
record_set_objects.append(
self.create_record_set(record_set_dict)
)
return record_set_objects
|
Accept list of record_set dicts.
Return list of record_set objects.
|
def create_route(self, item, routes):
"""Stores a new item in routing map"""
for route in routes:
self._routes.setdefault(route, set()).add(item)
return item
|
Stores a new item in routing map
|
def parse_conditional_derived_variable(self, node):
"""
Parses <ConditionalDerivedVariable>
@param node: Node containing the <ConditionalDerivedVariable> element
@type node: xml.etree.Element
@raise ParseError: Raised when no name or value is specified for the conditional derived variable.
"""
if 'name' in node.lattrib:
name = node.lattrib['name']
elif 'exposure' in node.lattrib:
name = node.lattrib['exposure']
else:
self.raise_error('<ConditionalDerivedVariable> must specify a name')
if 'exposure' in node.lattrib:
exposure = node.lattrib['exposure']
else:
exposure = None
if 'dimension' in node.lattrib:
dimension = node.lattrib['dimension']
else:
dimension = None
conditional_derived_variable = ConditionalDerivedVariable(name, dimension, exposure)
self.current_regime.add_conditional_derived_variable(conditional_derived_variable)
self.current_conditional_derived_variable = conditional_derived_variable
self.process_nested_tags(node)
|
Parses <ConditionalDerivedVariable>
@param node: Node containing the <ConditionalDerivedVariable> element
@type node: xml.etree.Element
@raise ParseError: Raised when no name or value is specified for the conditional derived variable.
|
def _groupby_consecutive(txn, max_delta=pd.Timedelta('8h')):
"""Merge transactions of the same direction separated by less than
max_delta time duration.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed round_trips. One row per trade.
- See full explanation in tears.create_full_tear_sheet
max_delta : pandas.Timedelta (optional)
Merge transactions in the same direction separated by less
than max_delta time duration.
Returns
-------
transactions : pd.DataFrame
"""
def vwap(transaction):
if transaction.amount.sum() == 0:
warnings.warn('Zero transacted shares, setting vwap to nan.')
return np.nan
return (transaction.amount * transaction.price).sum() / \
transaction.amount.sum()
out = []
for sym, t in txn.groupby('symbol'):
t = t.sort_index()
t.index.name = 'dt'
t = t.reset_index()
t['order_sign'] = t.amount > 0
t['block_dir'] = (t.order_sign.shift(
1) != t.order_sign).astype(int).cumsum()
t['block_time'] = ((t.dt.sub(t.dt.shift(1))) >
max_delta).astype(int).cumsum()
grouped_price = (t.groupby(('block_dir',
'block_time'))
.apply(vwap))
grouped_price.name = 'price'
grouped_rest = t.groupby(('block_dir', 'block_time')).agg({
'amount': 'sum',
'symbol': 'first',
'dt': 'first'})
grouped = grouped_rest.join(grouped_price)
out.append(grouped)
out = pd.concat(out)
out = out.set_index('dt')
return out
|
Merge transactions of the same direction separated by less than
max_delta time duration.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed round_trips. One row per trade.
- See full explanation in tears.create_full_tear_sheet
max_delta : pandas.Timedelta (optional)
Merge transactions in the same direction separated by less
than max_delta time duration.
Returns
-------
transactions : pd.DataFrame
|
async def plonks(self, ctx):
"""Shows members banned from the bot."""
plonks = self.config.get('plonks', {})
guild = ctx.message.server
db = plonks.get(guild.id, [])
members = '\n'.join(map(str, filter(None, map(guild.get_member, db))))
if members:
await self.bot.responses.basic(title="Plonked Users:", message=members)
else:
await self.bot.responses.failure(message='No members are banned in this server.')
|
Shows members banned from the bot.
|
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute(num_retries=self.num_retries)
return tables_resource['schema']
|
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
|
def remove(self, interval):
"""
Returns self after removing the interval and balancing.
If interval is not present, raise ValueError.
"""
# since this is a list, called methods can set this to [1],
# making it true
done = []
return self.remove_interval_helper(interval, done, should_raise_error=True)
|
Returns self after removing the interval and balancing.
If interval is not present, raise ValueError.
|
def getFixedStars(self):
""" Returns a list with all fixed stars. """
IDs = const.LIST_FIXED_STARS
return ephem.getFixedStarList(IDs, self.date)
|
Returns a list with all fixed stars.
|
def _set_target_root_count_in_runtracker(self):
"""Sets the target root count in the run tracker's daemon stats object."""
# N.B. `self._target_roots` is always an expanded list of `Target` objects as
# provided by `GoalRunner`.
target_count = len(self._target_roots)
self.run_tracker.pantsd_stats.set_target_root_size(target_count)
return target_count
|
Sets the target root count in the run tracker's daemon stats object.
|
def addSuccess(self, test: unittest.case.TestCase) -> None:
"""
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
"""
# noinspection PyTypeChecker
self.add_result(TestState.success, test)
|
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
|
def register_work(self, work, deps=None, manager=None, workdir=None):
"""
Register a new :class:`Work` and add it to the internal list, taking into account possible dependencies.
Args:
work: :class:`Work` object.
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the work.
workdir: The name of the directory used for the :class:`Work`.
Returns:
The registered :class:`Work`.
"""
if getattr(self, "workdir", None) is not None:
# The flow has a directory, build the named of the directory of the work.
work_workdir = None
if workdir is None:
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
else:
work_workdir = os.path.join(self.workdir, os.path.basename(workdir))
work.set_workdir(work_workdir)
if manager is not None:
work.set_manager(manager)
self.works.append(work)
if deps:
deps = [Dependency(node, exts) for node, exts in deps.items()]
work.add_deps(deps)
return work
|
Register a new :class:`Work` and add it to the internal list, taking into account possible dependencies.
Args:
work: :class:`Work` object.
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the work.
workdir: The name of the directory used for the :class:`Work`.
Returns:
The registered :class:`Work`.
|
def restore_app_connection(self, port=None):
"""Restores the app after device got reconnected.
Instead of creating new instance of the client:
- Uses the given port (or find a new available host_port if none is
given).
- Tries to connect to remote server with selected port.
Args:
port: If given, this is the host port from which to connect to remote
device port. If not provided, find a new available port as host
port.
Raises:
AppRestoreConnectionError: When the app was not able to be started.
"""
self.host_port = port or utils.get_available_host_port()
self._adb.forward(
['tcp:%d' % self.host_port,
'tcp:%d' % self.device_port])
try:
self.connect()
except:
# Log the original error and raise AppRestoreConnectionError.
self.log.exception('Failed to re-connect to app.')
raise jsonrpc_client_base.AppRestoreConnectionError(
self._ad,
('Failed to restore app connection for %s at host port %s, '
'device port %s') % (self.package, self.host_port,
self.device_port))
# Because the previous connection was lost, update self._proc
self._proc = None
self._restore_event_client()
|
Restores the app after device got reconnected.
Instead of creating new instance of the client:
- Uses the given port (or find a new available host_port if none is
given).
- Tries to connect to remote server with selected port.
Args:
port: If given, this is the host port from which to connect to remote
device port. If not provided, find a new available port as host
port.
Raises:
AppRestoreConnectionError: When the app was not able to be started.
|
def export_account_state(self, account_state):
"""
Make an account state presentable to external consumers
"""
return {
'address': account_state['address'],
'type': account_state['type'],
'credit_value': '{}'.format(account_state['credit_value']),
'debit_value': '{}'.format(account_state['debit_value']),
'lock_transfer_block_id': account_state['lock_transfer_block_id'],
'block_id': account_state['block_id'],
'vtxindex': account_state['vtxindex'],
'txid': account_state['txid'],
}
|
Make an account state presentable to external consumers
|
def grantSystemPermission(self, login, user, perm):
"""
Parameters:
- login
- user
- perm
"""
self.send_grantSystemPermission(login, user, perm)
self.recv_grantSystemPermission()
|
Parameters:
- login
- user
- perm
|
def survey_loader(sur_dir=SUR_DIR, sur_file=SUR_FILE):
"""Loads up the given survey in the given dir."""
survey_path = os.path.join(sur_dir, sur_file)
survey = None
with open(survey_path) as survey_file:
survey = Survey(survey_file.read())
return survey
|
Loads up the given survey in the given dir.
|
def list_settings(self):
"""
Get list of all appropriate settings and their default values.
The returned list is then used in setup() and get_setup() methods to setup
the widget internal settings.
"""
return [
(self.SETTING_FLAG_PLAIN, False),
(self.SETTING_FLAG_ASCII, False),
(self.SETTING_WIDTH, 0),
(self.SETTING_ALIGN, '<'),
(self.SETTING_TEXT_FORMATING, {}),
(self.SETTING_DATA_FORMATING, '{:s}'),
(self.SETTING_DATA_TYPE, None),
(self.SETTING_PADDING, None),
(self.SETTING_PADDING_CHAR, ' '),
(self.SETTING_PADDING_LEFT, None),
(self.SETTING_PADDING_RIGHT, None),
(self.SETTING_MARGIN, None),
(self.SETTING_MARGIN_CHAR, ' '),
(self.SETTING_MARGIN_LEFT, None),
(self.SETTING_MARGIN_RIGHT, None),
]
|
Get list of all appropriate settings and their default values.
The returned list is then used in setup() and get_setup() methods to setup
the widget internal settings.
|
def stdout(self):
"""
Флаг --stdout может быть взят из переменной окружения CROSSPM_STDOUT.
Если есть любое значение в CROSSPM_STDOUT - оно понимается как True
:return:
"""
# --stdout
stdout = self._args['--stdout']
if stdout:
return True
# CROSSPM_STDOUT
stdout_env = os.getenv('CROSSPM_STDOUT', None)
if stdout_env is not None:
return True
return False
|
Флаг --stdout может быть взят из переменной окружения CROSSPM_STDOUT.
Если есть любое значение в CROSSPM_STDOUT - оно понимается как True
:return:
|
def _actionsFreqsAngles(self,*args,**kwargs):
"""
NAME:
actionsFreqsAngles (_actionsFreqsAngles)
PURPOSE:
evaluate the actions, frequencies, and angles (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
delta= (object-wide default) can be used to override the object-wide focal length; can also be an array with length N to allow different delta for different phase-space points
u0= (None) if object-wide option useu0 is set, u0 to use (if useu0 and useu0 is None, a good value will be computed)
c= (object-wide default, bool) True/False to override the object-wide setting for whether or not to use the C implementation
order= (10) number of points to use in the Gauss-Legendre numerical integration of the relevant action, frequency, and angle integrals
When not using C:
fixed_quad= (False) if True, use Gaussian quadrature (scipy.integrate.fixed_quad instead of scipy.integrate.quad)
scipy.integrate.fixed_quad or .quad keywords
OUTPUT:
(jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
HISTORY:
2013-08-28 - Written - Bovy (IAS)
"""
delta= kwargs.pop('delta',self._delta)
order= kwargs.get('order',self._order)
if ((self._c and not ('c' in kwargs and not kwargs['c']))\
or (ext_loaded and (('c' in kwargs and kwargs['c'])))) \
and _check_c(self._pot):
if len(args) == 5: #R,vR.vT, z, vz pragma: no cover
raise IOError("Must specify phi")
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
phi= self._eval_phi
if isinstance(R,float):
R= nu.array([R])
vR= nu.array([vR])
vT= nu.array([vT])
z= nu.array([z])
vz= nu.array([vz])
phi= nu.array([phi])
Lz= R*vT
if self._useu0:
#First calculate u0
if 'u0' in kwargs:
u0= nu.asarray(kwargs['u0'])
else:
E= nu.array([_evaluatePotentials(self._pot,R[ii],z[ii])
+vR[ii]**2./2.+vz[ii]**2./2.+vT[ii]**2./2. for ii in range(len(R))])
u0= actionAngleStaeckel_c.actionAngleStaeckel_calcu0(\
E,Lz,self._pot,delta)[0]
kwargs.pop('u0',None)
else:
u0= None
jr, jz, Omegar, Omegaphi, Omegaz, angler, anglephi,anglez, err= actionAngleStaeckel_c.actionAngleFreqAngleStaeckel_c(\
self._pot,delta,R,vR,vT,z,vz,phi,u0=u0,order=order)
# Adjustements for close-to-circular orbits
indx= nu.isnan(Omegar)*(jr < 10.**-3.)+nu.isnan(Omegaz)*(jz < 10.**-3.) #Close-to-circular and close-to-the-plane orbits
if nu.sum(indx) > 0:
Omegar[indx]= [epifreq(self._pot,r,use_physical=False) for r in R[indx]]
Omegaphi[indx]= [omegac(self._pot,r,use_physical=False) for r in R[indx]]
Omegaz[indx]= [verticalfreq(self._pot,r,use_physical=False) for r in R[indx]]
if err == 0:
return (jr,Lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
else:
raise RuntimeError("C-code for calculation actions failed; try with c=False") #pragma: no cover
else: #pragma: no cover
if 'c' in kwargs and kwargs['c'] and not self._c: #pragma: no cover
warnings.warn("C module not used because potential does not have a C implementation",galpyWarning)
raise NotImplementedError("actionsFreqs with c=False not implemented")
|
NAME:
actionsFreqsAngles (_actionsFreqsAngles)
PURPOSE:
evaluate the actions, frequencies, and angles (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
delta= (object-wide default) can be used to override the object-wide focal length; can also be an array with length N to allow different delta for different phase-space points
u0= (None) if object-wide option useu0 is set, u0 to use (if useu0 and useu0 is None, a good value will be computed)
c= (object-wide default, bool) True/False to override the object-wide setting for whether or not to use the C implementation
order= (10) number of points to use in the Gauss-Legendre numerical integration of the relevant action, frequency, and angle integrals
When not using C:
fixed_quad= (False) if True, use Gaussian quadrature (scipy.integrate.fixed_quad instead of scipy.integrate.quad)
scipy.integrate.fixed_quad or .quad keywords
OUTPUT:
(jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
HISTORY:
2013-08-28 - Written - Bovy (IAS)
|
def mask_by_linear_ind(self, linear_inds):
"""Create a new image by zeroing out data at locations not in the
given indices.
Parameters
----------
linear_inds : :obj:`numpy.ndarray` of int
A list of linear coordinates.
Returns
-------
:obj:`Image`
A new Image of the same type, with data not indexed by inds set
to zero.
"""
inds = self.linear_to_ij(linear_inds)
return self.mask_by_ind(inds)
|
Create a new image by zeroing out data at locations not in the
given indices.
Parameters
----------
linear_inds : :obj:`numpy.ndarray` of int
A list of linear coordinates.
Returns
-------
:obj:`Image`
A new Image of the same type, with data not indexed by inds set
to zero.
|
def add_comes_from(self, basic_block):
""" This simulates a set. Adds the basic_block to the comes_from
list if not done already.
"""
if basic_block is None:
return
if self.lock:
return
# Return if already added
if basic_block in self.comes_from:
return
self.lock = True
self.comes_from.add(basic_block)
basic_block.add_goes_to(self)
self.lock = False
|
This simulates a set. Adds the basic_block to the comes_from
list if not done already.
|
def libvlc_video_set_key_input(p_mi, on):
'''Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle key press events, false to ignore them.
'''
f = _Cfunctions.get('libvlc_video_set_key_input', None) or \
_Cfunction('libvlc_video_set_key_input', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, on)
|
Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle key press events, false to ignore them.
|
def extract(self, doc):
"""From the defined JSONPath(s), pull out the values and
insert them into a document with renamed field(s) then
apply the Extractor and return the doc with the extracted values """
if isinstance(self.jsonpaths, JSONPath):
input_field = self.extractor.get_renamed_input_fields()
if isinstance(self.extractor.get_renamed_input_fields(), list):
input_field = input_field[0]
jsonpath = self.jsonpaths
renamed_inputs = dict()
if self.flat_map_inputs:
flat_mapped = itertools.chain.from_iterable(
[iter(match.value)
if hasattr(match.value, '__iter__') and
not isinstance(match.value, dict) and
not isinstance(match.value, basestring)
else iter([match.value])
for match in jsonpath.find(doc)])
renamed_inputs[input_field] = flat_mapped
if input_field in renamed_inputs:
self.extract_from_renamed_inputs(doc, renamed_inputs)
else:
for value in [match.value for match in jsonpath.find(doc)]:
renamed_inputs[input_field] = value
self.extract_from_renamed_inputs(doc, renamed_inputs)
elif isinstance(self.jsonpaths, types.ListType):
renamed_inputs_lists = dict()
for jsonpath, renamed_input in \
itertools.izip(
iter(self.jsonpaths),
iter(self.extractor.get_renamed_input_fields())):
renamed_inputs_lists[renamed_input] = [
match.value for match in jsonpath.find(doc)]
if self.flat_map_inputs:
renamed_inputs_tuple_lists = [
(x, itertools.chain.from_iterable(
[iter(z) if hasattr(z, '__iter__') and
not isinstance(z, dict) and
not isinstance(z, basestring)
else iter([z])for z in y]))
for x, y in renamed_inputs_lists.iteritems()]
renamed_inputs = reduce(
ExtractorProcessor.add_tuple_to_doc,
renamed_inputs_tuple_lists, dict())
self.extract_from_renamed_inputs(doc, renamed_inputs)
else:
renamed_inputs_lists_lists = [[(x, z) for z in y]
for x, y in
renamed_inputs_lists.iteritems()]
for i in itertools.product(*renamed_inputs_lists_lists):
renamed_inputs = reduce(
ExtractorProcessor.add_tuple_to_doc, i, dict())
self.extract_from_renamed_inputs(doc, renamed_inputs)
else:
raise ValueError("input_fields must be a string or a list")
return doc
|
From the defined JSONPath(s), pull out the values and
insert them into a document with renamed field(s) then
apply the Extractor and return the doc with the extracted values
|
def finished(experiment_name, reset=True):
"""
Track a conversion.
:param experiment_name: Name of the experiment.
:param reset: If set to `True` current user's session is reset so that they
may start the test again in the future. If set to `False` the user
will always see the alternative they started with. Defaults to `True`.
"""
if _exclude_visitor():
return
redis = _get_redis_connection()
try:
experiment = Experiment.find(redis, experiment_name)
if not experiment:
return
alternative_name = _get_session().get(experiment.key)
if alternative_name:
split_finished = set(session.get('split_finished', []))
if experiment.key not in split_finished:
alternative = Alternative(
redis, alternative_name, experiment_name)
alternative.increment_completion()
if reset:
_get_session().pop(experiment.key, None)
try:
split_finished.remove(experiment.key)
except KeyError:
pass
else:
split_finished.add(experiment.key)
session['split_finished'] = list(split_finished)
except ConnectionError:
if not current_app.config['SPLIT_DB_FAILOVER']:
raise
|
Track a conversion.
:param experiment_name: Name of the experiment.
:param reset: If set to `True` current user's session is reset so that they
may start the test again in the future. If set to `False` the user
will always see the alternative they started with. Defaults to `True`.
|
def set_copy_mode(self, use_copy: bool):
"""
Set all protocols in copy mode. They will return a copy of their protocol.
This is used for writable mode in CFC.
:param use_copy:
:return:
"""
for group in self.rootItem.children:
for proto in group.children:
proto.copy_data = use_copy
|
Set all protocols in copy mode. They will return a copy of their protocol.
This is used for writable mode in CFC.
:param use_copy:
:return:
|
def stop(self):
""" Try to gracefully stop the greenlet synchronously
Stop isn't expected to re-raise greenlet _run exception
(use self.greenlet.get() for that),
but it should raise any stop-time exception """
if self._stop_event.ready():
return
self._stop_event.set()
self._global_send_event.set()
for retrier in self._address_to_retrier.values():
if retrier:
retrier.notify()
self._client.set_presence_state(UserPresence.OFFLINE.value)
self._client.stop_listener_thread() # stop sync_thread, wait client's greenlets
# wait own greenlets, no need to get on them, exceptions should be raised in _run()
gevent.wait(self.greenlets + [r.greenlet for r in self._address_to_retrier.values()])
# Ensure keep-alive http connections are closed
self._client.api.session.close()
self.log.debug('Matrix stopped', config=self._config)
del self.log
|
Try to gracefully stop the greenlet synchronously
Stop isn't expected to re-raise greenlet _run exception
(use self.greenlet.get() for that),
but it should raise any stop-time exception
|
def parse_args(args=None):
"""
Parse arguments provided as a list of strings, and return a namespace
with parameter names matching the arguments
:param args: List of strings to be parsed as command-line arguments. If
none, reads in sys.argv as the values.
:return: a namespace containing arguments values
"""
parser = argparse.ArgumentParser(description=ds.ARGPARSER['description'])
parser.add_argument('input',
help=ds.ARGPARSE_INPUT['help'])
parser.add_argument('output',
nargs='?',
help=ds.ARGPARSE_OUTPUT['help'],
default=ds.ARGPARSE_OUTPUT['default'])
parser.add_argument('-X', '--overwrite',
help=ds.ARGPARSE_OVERWRITE['help'],
action='store_true')
parser.add_argument('-e', '--extensions',
nargs='+',
default=ds.ARGPARSE_EXTENSION['default'],
help=ds.ARGPARSE_EXTENSION['help'])
parser.add_argument('-w', '--wrapper',
help=ds.ARGPARSE_WRAPPER['help'],
default=ds.ARGPARSE_WRAPPER['default'], )
parser.add_argument('-v', '--verbose',
help=ds.ARGPARSE_VERBOSE['help'],
action='store_true')
parser.add_argument('-r', '-R',
help=ds.ARGPARSE_RECURSIVE['help'],
action='store_true',
dest='recursive')
parser.add_argument('--version',
action='version',
version=ah.__version__)
if args is not None:
return parser.parse_args(args)
else:
return parser.parse_args()
|
Parse arguments provided as a list of strings, and return a namespace
with parameter names matching the arguments
:param args: List of strings to be parsed as command-line arguments. If
none, reads in sys.argv as the values.
:return: a namespace containing arguments values
|
def emit(self, record):
"""Logs a new record
If a logging view is given, it is used to log the new record to. The code is partially copied from the
StreamHandler class.
:param record:
:return:
"""
try:
# Shorten the source name of the record (remove rafcon.)
if sys.version_info >= (2, 7):
record.__setattr__("name", record.name.replace("rafcon.", ""))
msg = self.format(record)
fs = "%s"
try:
ufs = u'%s'
try:
entry = ufs % msg
except UnicodeEncodeError:
entry = fs % msg
except UnicodeError:
entry = fs % msg
for logging_view in self._logging_views.values():
logging_view.print_message(entry, record.levelno)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
|
Logs a new record
If a logging view is given, it is used to log the new record to. The code is partially copied from the
StreamHandler class.
:param record:
:return:
|
def setaty(self, content):
"""
Grab the (aty) soap-enc:arrayType and attach it to the
content for proper array processing later in end().
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: self
@rtype: L{Encoded}
"""
name = 'arrayType'
ns = (None, 'http://schemas.xmlsoap.org/soap/encoding/')
aty = content.node.get(name, ns)
if aty is not None:
content.aty = aty
parts = aty.split('[')
ref = parts[0]
if len(parts) == 2:
self.applyaty(content, ref)
else:
pass # (2) dimensional array
return self
|
Grab the (aty) soap-enc:arrayType and attach it to the
content for proper array processing later in end().
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: self
@rtype: L{Encoded}
|
def Main(url, similarity_mode="TfIdfCosine", similarity_limit=0.75):
'''
Entry Point.
Args:
url: PDF url.
'''
# The object of Web-scraping.
web_scrape = WebScraping()
# Set the object of reading PDF files.
web_scrape.readable_web_pdf = WebPDFReading()
# Execute Web-scraping.
document = web_scrape.scrape(url)
if similarity_mode == "TfIdfCosine":
# The object of `Similarity Filter`.
# The similarity observed by this object is so-called cosine similarity of Tf-Idf vectors.
similarity_filter = TfIdfCosine()
elif similarity_mode == "Dice":
# The object of `Similarity Filter`.
# The similarity observed by this object is the Dice coefficient.
similarity_filter = Dice()
elif similarity_mode == "Jaccard":
# The object of `Similarity Filter`.
# The similarity observed by this object is the Jaccard coefficient.
similarity_filter = Jaccard()
elif similarity_mode == "Simpson":
# The object of `Similarity Filter`.
# The similarity observed by this object is the Simpson coefficient.
similarity_filter = Simpson()
else:
raise ValueError()
# The object of the NLP.
nlp_base = NlpBase()
# Set tokenizer. This is japanese tokenizer with MeCab.
nlp_base.tokenizable_doc = MeCabTokenizer()
# Set the object of NLP.
similarity_filter.nlp_base = nlp_base
# If the similarity exceeds this value, the sentence will be cut off.
similarity_filter.similarity_limit = similarity_limit
# The object of automatic sumamrization.
auto_abstractor = AutoAbstractor()
# Set tokenizer. This is japanese tokenizer with MeCab.
auto_abstractor.tokenizable_doc = MeCabTokenizer()
# Object of abstracting and filtering document.
abstractable_doc = TopNRankAbstractor()
# Execute summarization.
result_dict = auto_abstractor.summarize(document, abstractable_doc, similarity_filter)
# Output summarized sentence.
[print(result_dict["summarize_result"][i]) for i in range(len(result_dict["summarize_result"])) if i < 3]
|
Entry Point.
Args:
url: PDF url.
|
def predraw(self):
"""
Sets up the attributes used by :py:class:`Layer3D()` and calls :py:meth:`Layer3D.predraw()`\ .
"""
self.cam = self.view.cam
super(LayerWorld,self).predraw()
|
Sets up the attributes used by :py:class:`Layer3D()` and calls :py:meth:`Layer3D.predraw()`\ .
|
def ratelimit_remaining(self):
"""Number of requests before GitHub imposes a ratelimit.
:returns: int
"""
json = self._json(self._get(self._github_url + '/rate_limit'), 200)
core = json.get('resources', {}).get('core', {})
self._remaining = core.get('remaining', 0)
return self._remaining
|
Number of requests before GitHub imposes a ratelimit.
:returns: int
|
def hlen(key, host=None, port=None, db=None, password=None):
'''
Returns number of fields of a hash.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' redis.hlen foo_hash
'''
server = _connect(host, port, db, password)
return server.hlen(key)
|
Returns number of fields of a hash.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' redis.hlen foo_hash
|
def addLineWidget( self, query = None ):
"""
Adds a new line widget to the system with the given values.
:param query | (<str> term, <str> operator, <str> vlaue) || None
"""
widget = XQueryLineWidget(self)
widget.setTerms(sorted(self._rules.keys()))
widget.setQuery(query)
index = self._container.layout().count() - 1
self._container.layout().insertWidget(index, widget)
widget.addRequested.connect( self.addLineWidget )
widget.removeRequested.connect( self.removeLineWidget )
# update the remove enabled options for these widgets
self.updateRemoveEnabled()
|
Adds a new line widget to the system with the given values.
:param query | (<str> term, <str> operator, <str> vlaue) || None
|
def _merge_defaults(self, config):
"""The config object loads its values from two sources, with the
following precedence:
1. data/default_config.yaml
2. The config file itself, passed in to this object in the
constructor as `path`.
in case of conflict, the config file dominates.
"""
fn = resource_filename('osprey', join('data', 'default_config.yaml'))
with open(fn) as f:
default = parse(f)
return reduce(dict_merge, [default, config])
|
The config object loads its values from two sources, with the
following precedence:
1. data/default_config.yaml
2. The config file itself, passed in to this object in the
constructor as `path`.
in case of conflict, the config file dominates.
|
def create_vlan(self, id_vlan):
""" Set column 'ativada = 1'.
:param id_vlan: VLAN identifier.
:return: None
"""
vlan_map = dict()
vlan_map['vlan_id'] = id_vlan
code, xml = self.submit({'vlan': vlan_map}, 'PUT', 'vlan/create/')
return self.response(code, xml)
|
Set column 'ativada = 1'.
:param id_vlan: VLAN identifier.
:return: None
|
def fetch_chunk_data(self):
"""If period of time between start end end is bigger then one year
We have to create and fetch chunks dates (6 months chunks)."""
data = []
counter = (relativedelta(self.end_date, self.start_date).months / 6) + 1
months = 0
for month in range(counter):
chunk_start_date = self.start_date + relativedelta(months=months)
chunk_end_date = self.start_date + relativedelta(months=months + 6)
months += 6
if chunk_end_date > self.end_date:
chunk_end_date = self.end_date
data = data + self.request.send(self.symbol, chunk_start_date, chunk_end_date)
return data
|
If period of time between start end end is bigger then one year
We have to create and fetch chunks dates (6 months chunks).
|
def _setup_serializers(self):
"""
Auto set the return serializer based on Accept headers
http://docs.webob.org/en/latest/reference.html#header-getters
Intersection of requested types and supported types tells us if we
can in fact respond in one of the request formats
"""
acceptable_offers = self.request.accept.acceptable_offers(self.response.supported_mime_types)
if len(acceptable_offers) > 0:
best_accept_match = acceptable_offers[0][0]
else:
best_accept_match = self.response.default_serializer.content_type()
# best_accept_match = self.request.accept.best_match(
# self.response.supported_mime_types,
# default_match=self.response.default_serializer.content_type()
# )
self.logger.info("%s determined as best match for accept header: %s" % (
best_accept_match,
self.request.accept
))
# if content_type is not acceptable it will raise UnsupportedVocabulary
self.response.content_type = best_accept_match
|
Auto set the return serializer based on Accept headers
http://docs.webob.org/en/latest/reference.html#header-getters
Intersection of requested types and supported types tells us if we
can in fact respond in one of the request formats
|
def list_dir(self):
"""
Non-recursive file listing.
:returns: A generator over files in this "directory" for efficiency.
"""
bucket = self.blob.bucket
prefix = self.blob.name
if not prefix.endswith('/'): prefix += '/'
for blob in bucket.list_blobs(prefix=prefix, delimiter='/'):
yield 'gs://{}/{}'.format(blob.bucket.name, blob.name)
|
Non-recursive file listing.
:returns: A generator over files in this "directory" for efficiency.
|
def pack(header, s):
"""Pack a string into MXImageRecord.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
s : str
Raw image string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> with open(path, 'r') as file:
... s = file.read()
>>> packed_s = mx.recordio.pack(header, s)
"""
header = IRHeader(*header)
if isinstance(header.label, numbers.Number):
header = header._replace(flag=0)
else:
label = np.asarray(header.label, dtype=np.float32)
header = header._replace(flag=label.size, label=0)
s = label.tostring() + s
s = struct.pack(_IR_FORMAT, *header) + s
return s
|
Pack a string into MXImageRecord.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
s : str
Raw image string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> with open(path, 'r') as file:
... s = file.read()
>>> packed_s = mx.recordio.pack(header, s)
|
def clear_learning_objectives(self):
"""Clears the learning objectives.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.clear_assets_template
if (self.get_learning_objectives_metadata().is_read_only() or
self.get_learning_objectives_metadata().is_required()):
raise errors.NoAccess()
self._my_map['learningObjectiveIds'] = self._learning_objectives_default
|
Clears the learning objectives.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
|
def edges_unique(self):
"""
The unique edges of the mesh.
Returns
----------
edges_unique : (n, 2) int
Vertex indices for unique edges
"""
unique, inverse = grouping.unique_rows(self.edges_sorted)
edges_unique = self.edges_sorted[unique]
# edges_unique will be added automatically by the decorator
# additional terms generated need to be added to the cache manually
self._cache['edges_unique_idx'] = unique
self._cache['edges_unique_inverse'] = inverse
return edges_unique
|
The unique edges of the mesh.
Returns
----------
edges_unique : (n, 2) int
Vertex indices for unique edges
|
def get_changes(self, extracted_name, similar=False, global_=False):
"""Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global.
"""
info = _ExtractInfo(
self.project, self.resource, self.start_offset, self.end_offset,
extracted_name, variable=self.kind == 'variable',
similar=similar, make_global=global_)
new_contents = _ExtractPerformer(info).extract()
changes = ChangeSet('Extract %s <%s>' % (self.kind,
extracted_name))
changes.add_change(ChangeContents(self.resource, new_contents))
return changes
|
Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global.
|
def schedule(self, task: Schedulable, *args, **kwargs):
"""Add a job to be executed ASAP to the batch.
:arg task: the task or its name to execute in the background
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
"""
at = datetime.now(timezone.utc)
self.schedule_at(task, at, *args, **kwargs)
|
Add a job to be executed ASAP to the batch.
:arg task: the task or its name to execute in the background
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.