code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def readColorLUT(infile, distance_modulus, mag_1, mag_2, mag_err_1, mag_err_2):
"""
Take in a color look-up table and return the signal color evaluated for each object.
Consider making the argument a Catalog object rather than magnitudes and uncertainties.
"""
reader = pyfits.open(infile)
distance_modulus_array = reader['DISTANCE_MODULUS'].data.field('DISTANCE_MODULUS')
if not numpy.any(numpy.fabs(distance_modulus_array - distance_modulus) < 1.e-3):
logger.warning("Distance modulus %.2f not available in file %s"%(distance_modulus, infile))
logger.warning(' available distance moduli:'+str(distance_modulus_array))
return False
distance_modulus_key = '%.2f'%(distance_modulus_array[numpy.argmin(numpy.fabs(distance_modulus_array - distance_modulus))])
bins_mag_err = reader['BINS_MAG_ERR'].data.field('BINS_MAG_ERR')
bins_mag_1 = reader['BINS_MAG_1'].data.field('BINS_MAG_1')
bins_mag_2 = reader['BINS_MAG_2'].data.field('BINS_MAG_2')
# Note that magnitude uncertainty is always assigned by rounding up, is this the right thing to do?
index_mag_err_1 = numpy.clip(numpy.digitize(mag_err_1, bins_mag_err) - 1,
0, len(bins_mag_err) - 2)
index_mag_err_2 = numpy.clip(numpy.digitize(mag_err_2, bins_mag_err) - 1,
0, len(bins_mag_err) - 2)
u_color = numpy.zeros(len(mag_1))
for index_mag_err_1_select in range(0, len(bins_mag_err) - 1):
for index_mag_err_2_select in range(0, len(bins_mag_err) - 1):
cut = numpy.logical_and(index_mag_err_1 == index_mag_err_1_select,
index_mag_err_2 == index_mag_err_2_select)
if numpy.sum(cut) < 1:
continue
histo = reader[distance_modulus_key].data.field('%i%i'%(index_mag_err_1_select, index_mag_err_2_select))
u_color[cut] = ugali.utils.binning.take2D(histo,
mag_2[cut], mag_1[cut],
bins_mag_2, bins_mag_1)
reader.close()
return u_color
|
Take in a color look-up table and return the signal color evaluated for each object.
Consider making the argument a Catalog object rather than magnitudes and uncertainties.
|
def Group(params, name=None, type=None):
"""Groups together Params for adding under the 'What' section.
Args:
params(list of :func:`Param`): Parameter elements to go in this group.
name(str): Group name. NB ``None`` is valid, since the group may be
best identified by its type.
type(str): Type of group, e.g. 'complex' (for real and imaginary).
"""
atts = {}
if name:
atts['name'] = name
if type:
atts['type'] = type
g = objectify.Element('Group', attrib=atts)
for p in params:
g.append(p)
return g
|
Groups together Params for adding under the 'What' section.
Args:
params(list of :func:`Param`): Parameter elements to go in this group.
name(str): Group name. NB ``None`` is valid, since the group may be
best identified by its type.
type(str): Type of group, e.g. 'complex' (for real and imaginary).
|
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
|
Open a docx file, return a document XML tree
|
def new_from_list(cls, content, fill_title=True, **kwargs):
"""Populates the Table with a list of tuples of strings.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title
"""
obj = cls(**kwargs)
obj.append_from_list(content, fill_title)
return obj
|
Populates the Table with a list of tuples of strings.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title
|
def block(self, event, emptyEvents = ()):
'''
Return a recently popped event to queue, and block all later events until unblock.
Only the sub-queue directly containing the event is blocked, so events in other queues may still be processed.
It is illegal to call block and unblock in different queues with a same event.
:param event: the returned event. When the queue is unblocked later, this event will be popped again.
:param emptyEvents: reactivate the QueueIsEmptyEvents
'''
q = self.tree.matchfirst(event)
q.block(event)
self.blockEvents[event] = q
for ee in emptyEvents:
ee.queue.waitForEmpty()
|
Return a recently popped event to queue, and block all later events until unblock.
Only the sub-queue directly containing the event is blocked, so events in other queues may still be processed.
It is illegal to call block and unblock in different queues with a same event.
:param event: the returned event. When the queue is unblocked later, this event will be popped again.
:param emptyEvents: reactivate the QueueIsEmptyEvents
|
def get_interpolated_data(df: pd.DataFrame, e_min=np.nan, e_max=np.nan, e_step=np.nan):
"""return the interpolated x and y axis for the given x range [e_min, e_max] with step defined
:param df: input data frame
:type df: pandas.DataFrame
:param e_min: left energy range in eV of new interpolated data
:type e_min: float
:param e_max: right energy range in eV of new interpolated data
:type e_max: float
:param e_step: energy step in eV for interpolation
:type e_step: float
:return: x_axis and y_axis of interpolated data over specified range
:rtype: dict
"""
nbr_point = int((e_max - e_min) / e_step + 1)
x_axis = np.linspace(e_min, e_max, nbr_point).round(6)
y_axis_function = interp1d(x=df['E_eV'], y=df['Sig_b'], kind='linear')
y_axis = y_axis_function(x_axis)
return {'x_axis': x_axis, 'y_axis': y_axis}
|
return the interpolated x and y axis for the given x range [e_min, e_max] with step defined
:param df: input data frame
:type df: pandas.DataFrame
:param e_min: left energy range in eV of new interpolated data
:type e_min: float
:param e_max: right energy range in eV of new interpolated data
:type e_max: float
:param e_step: energy step in eV for interpolation
:type e_step: float
:return: x_axis and y_axis of interpolated data over specified range
:rtype: dict
|
def choose_language(self, lang, request):
"""Deal with the multiple corner case of choosing the language."""
# Can be an empty string or None
if not lang:
lang = get_language_from_request(request)
# Raise a 404 if the language is not in not in the list
if lang not in [key for (key, value) in settings.PAGE_LANGUAGES]:
raise Http404
# We're going to serve CMS pages in language lang;
# make django gettext use that language too
if lang and translation.check_for_language(lang):
translation.activate(lang)
return lang
|
Deal with the multiple corner case of choosing the language.
|
def _apply_shadow_vars(avg_grads):
"""
Create shadow variables on PS, and replace variables in avg_grads
by these shadow variables.
Args:
avg_grads: list of (grad, var) tuples
"""
ps_var_grads = []
for grad, var in avg_grads:
assert var.name.startswith('tower'), var.name
my_name = '/'.join(var.name.split('/')[1:])
my_name = get_op_tensor_name(my_name)[0]
new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype,
initializer=var.initial_value,
trainable=True)
# (g, v) to be applied, where v is global (ps vars)
ps_var_grads.append((grad, new_v))
return ps_var_grads
|
Create shadow variables on PS, and replace variables in avg_grads
by these shadow variables.
Args:
avg_grads: list of (grad, var) tuples
|
async def reinvoke(self, *, call_hooks=False, restart=True):
"""|coro|
Calls the command again.
This is similar to :meth:`~.Context.invoke` except that it bypasses
checks, cooldowns, and error handlers.
.. note::
If you want to bypass :exc:`.UserInputError` derived exceptions,
it is recommended to use the regular :meth:`~.Context.invoke`
as it will work more naturally. After all, this will end up
using the old arguments the user has used and will thus just
fail again.
Parameters
------------
call_hooks: :class:`bool`
Whether to call the before and after invoke hooks.
restart: :class:`bool`
Whether to start the call chain from the very beginning
or where we left off (i.e. the command that caused the error).
The default is to start where we left off.
"""
cmd = self.command
view = self.view
if cmd is None:
raise ValueError('This context is not valid.')
# some state to revert to when we're done
index, previous = view.index, view.previous
invoked_with = self.invoked_with
invoked_subcommand = self.invoked_subcommand
subcommand_passed = self.subcommand_passed
if restart:
to_call = cmd.root_parent or cmd
view.index = len(self.prefix)
view.previous = 0
view.get_word() # advance to get the root command
else:
to_call = cmd
try:
await to_call.reinvoke(self, call_hooks=call_hooks)
finally:
self.command = cmd
view.index = index
view.previous = previous
self.invoked_with = invoked_with
self.invoked_subcommand = invoked_subcommand
self.subcommand_passed = subcommand_passed
|
|coro|
Calls the command again.
This is similar to :meth:`~.Context.invoke` except that it bypasses
checks, cooldowns, and error handlers.
.. note::
If you want to bypass :exc:`.UserInputError` derived exceptions,
it is recommended to use the regular :meth:`~.Context.invoke`
as it will work more naturally. After all, this will end up
using the old arguments the user has used and will thus just
fail again.
Parameters
------------
call_hooks: :class:`bool`
Whether to call the before and after invoke hooks.
restart: :class:`bool`
Whether to start the call chain from the very beginning
or where we left off (i.e. the command that caused the error).
The default is to start where we left off.
|
def install_json_params(self, ij=None):
"""Return install.json params in a dict with name param as key.
Args:
ij (dict, optional): Defaults to None. The install.json contents.
Returns:
dict: A dictionary containing the install.json input params with name as key.
"""
if self._install_json_params is None or ij is not None:
self._install_json_params = {}
# TODO: support for projects with multiple install.json files is not supported
if ij is None:
ij = self.install_json
for p in ij.get('params') or []:
self._install_json_params.setdefault(p.get('name'), p)
return self._install_json_params
|
Return install.json params in a dict with name param as key.
Args:
ij (dict, optional): Defaults to None. The install.json contents.
Returns:
dict: A dictionary containing the install.json input params with name as key.
|
def authenticate(self, driver):
"""Authenticate using the SSH protocol specific FSM."""
# 0 1 2 3
events = [driver.press_return_re, driver.password_re, self.device.prompt_re, pexpect.TIMEOUT]
transitions = [
(driver.press_return_re, [0, 1], 1, partial(a_send, "\r\n"), 10),
(driver.password_re, [0], 1, partial(a_send_password, self._acquire_password()),
_C['first_prompt_timeout']),
(driver.password_re, [1], -1, a_authentication_error, 0),
(self.device.prompt_re, [0, 1], -1, None, 0),
(pexpect.TIMEOUT, [1], -1,
ConnectionError("Error getting device prompt") if self.device.is_target else partial(a_send, "\r\n"), 0)
]
self.log("EXPECTED_PROMPT={}".format(pattern_to_str(self.device.prompt_re)))
fsm = FSM("SSH-AUTH", self.device, events, transitions, init_pattern=self.last_pattern, timeout=30)
return fsm.run()
|
Authenticate using the SSH protocol specific FSM.
|
def ensure_local_repo(self):
"""Given a Dusty repo object, clone the remote into Dusty's local repos
directory if it does not already exist."""
if os.path.exists(self.managed_path):
logging.debug('Repo {} already exists'.format(self.remote_path))
return
logging.info('Initiating clone of local repo {}'.format(self.remote_path))
repo_path_parent = parent_dir(self.managed_path)
if not os.path.exists(repo_path_parent):
os.makedirs(repo_path_parent)
with git_error_handling():
git.Repo.clone_from(self.assemble_remote_path(), self.managed_path)
|
Given a Dusty repo object, clone the remote into Dusty's local repos
directory if it does not already exist.
|
def _dict_merge(dct, merge_dct):
"""Recursive dict merge.
Inspired by :meth:``dict.update()``, instead of updating only top-level
keys, dict_merge recurses down into dicts nested to an arbitrary depth,
updating keys. The ``merge_dct`` is merged into ``dct``.
From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
Arguments:
dct: dict onto which the merge is executed
merge_dct: dct merged into dct
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
_dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
|
Recursive dict merge.
Inspired by :meth:``dict.update()``, instead of updating only top-level
keys, dict_merge recurses down into dicts nested to an arbitrary depth,
updating keys. The ``merge_dct`` is merged into ``dct``.
From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
Arguments:
dct: dict onto which the merge is executed
merge_dct: dct merged into dct
|
def shell(name=None, **attrs):
"""Creates a new :class:`Shell` with a function as callback. This
works otherwise the same as :func:`command` just that the `cls`
parameter is set to :class:`Shell`.
"""
attrs.setdefault('cls', Shell)
return click.command(name, **attrs)
|
Creates a new :class:`Shell` with a function as callback. This
works otherwise the same as :func:`command` just that the `cls`
parameter is set to :class:`Shell`.
|
def register_monitors(self, *monitors):
"""
Register monitors they should be tuple of name and Theano variable.
"""
for key, node in monitors:
if key not in self._registered_monitors:
node *= 1.0 # Avoid CudaNdarray
self.training_monitors.append((key, node))
self.testing_monitors.append((key, node))
self._registered_monitors.add(key)
|
Register monitors they should be tuple of name and Theano variable.
|
def generate(self, state, size, dataset, backward=False):
"""Generate a sequence.
Parameters
----------
state : `str` or `iterable` of `str`
Initial state.
size : `int`
State size.
dataset : `str`
Dataset key.
backward : `bool`, optional
Link direction.
Returns
-------
`generator` of `str`
Node value generator.
"""
if isinstance(state, str):
state = self.split_state(state)
state = self.get_state(state, size)
dataset = self.get_dataset(dataset)
while True:
link, state = self.random_link(dataset, state, backward)
if link is None or backward and link == '':
return
yield link
|
Generate a sequence.
Parameters
----------
state : `str` or `iterable` of `str`
Initial state.
size : `int`
State size.
dataset : `str`
Dataset key.
backward : `bool`, optional
Link direction.
Returns
-------
`generator` of `str`
Node value generator.
|
def mutator(*cache_names):
"""Decorator for ``Document`` methods that change the document.
This decorator ensures that the object's caches are kept in sync
when changes are made.
"""
def deco(fn):
@wraps(fn)
def _fn(self, *args, **kwargs):
try:
return fn(self, *args, **kwargs)
finally:
for cache_name in cache_names:
setattr(self, cache_name, None)
return _fn
return deco
|
Decorator for ``Document`` methods that change the document.
This decorator ensures that the object's caches are kept in sync
when changes are made.
|
def delete_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False):
"""Delete the DCNM OUT partition and update the result. """
res = fw_const.DCNM_OUT_PART_DEL_SUCCESS
tenant_name = fw_dict.get('tenant_name')
ret = True
try:
self._delete_partition(tenant_id, tenant_name)
except Exception as exc:
LOG.error("deletion of Out Partition failed for tenant "
"%(tenant)s, Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
res = fw_const.DCNM_OUT_PART_DEL_FAIL
ret = False
self.update_fw_db_result(tenant_id, dcnm_status=res)
LOG.info("Out partition deleted")
return ret
|
Delete the DCNM OUT partition and update the result.
|
def _init(self, clnt):
'''initialize api by YunpianClient'''
assert clnt, "clnt is None"
self._clnt = clnt
self._apikey = clnt.apikey()
self._version = clnt.conf(YP_VERSION, defval=VERSION_V2)
self._charset = clnt.conf(HTTP_CHARSET, defval=CHARSET_UTF8)
self._name = self.__class__.__module__.split('.')[-1]
|
initialize api by YunpianClient
|
def anonymize_user(doc):
"""Preprocess an event by anonymizing user information.
The anonymization is done by removing fields that can uniquely identify a
user, such as the user's ID, session ID, IP address and User Agent, and
hashing them to produce a ``visitor_id`` and ``unique_session_id``. To
further secure the method, a randomly generated 32-byte salt is used, that
expires after 24 hours and is discarded. The salt values are stored in
Redis (or whichever backend Invenio-Cache uses). The ``unique_session_id``
is calculated in the same way as the ``visitor_id``, with the only
difference that it also takes into account the hour of the event . All of
these rules effectively mean that a user can have a unique ``visitor_id``
for each day and unique ``unique_session_id`` for each hour of a day.
This session ID generation process was designed according to the `Project
COUNTER Code of Practice <https://www.projectcounter.org/code-of-
practice-sections/general-information/>`_.
In addition to that the country of the user is extracted from the IP
address as a ISO 3166-1 alpha-2 two-letter country code (e.g. "CH" for
Switzerland).
"""
ip = doc.pop('ip_address', None)
if ip:
doc.update({'country': get_geoip(ip)})
user_id = doc.pop('user_id', '')
session_id = doc.pop('session_id', '')
user_agent = doc.pop('user_agent', '')
# A 'User Session' is defined as activity by a user in a period of
# one hour. timeslice represents the hour of the day in which
# the event has been generated and together with user info it determines
# the 'User Session'
timestamp = arrow.get(doc.get('timestamp'))
timeslice = timestamp.strftime('%Y%m%d%H')
salt = get_anonymization_salt(timestamp)
visitor_id = hashlib.sha224(salt.encode('utf-8'))
# TODO: include random salt here, that changes once a day.
# m.update(random_salt)
if user_id:
visitor_id.update(user_id.encode('utf-8'))
elif session_id:
visitor_id.update(session_id.encode('utf-8'))
elif ip and user_agent:
vid = '{}|{}|{}'.format(ip, user_agent, timeslice)
visitor_id.update(vid.encode('utf-8'))
else:
# TODO: add random data?
pass
unique_session_id = hashlib.sha224(salt.encode('utf-8'))
if user_id:
sid = '{}|{}'.format(user_id, timeslice)
unique_session_id.update(sid.encode('utf-8'))
elif session_id:
sid = '{}|{}'.format(session_id, timeslice)
unique_session_id.update(sid.encode('utf-8'))
elif ip and user_agent:
sid = '{}|{}|{}'.format(ip, user_agent, timeslice)
unique_session_id.update(sid.encode('utf-8'))
doc.update(dict(
visitor_id=visitor_id.hexdigest(),
unique_session_id=unique_session_id.hexdigest()
))
return doc
|
Preprocess an event by anonymizing user information.
The anonymization is done by removing fields that can uniquely identify a
user, such as the user's ID, session ID, IP address and User Agent, and
hashing them to produce a ``visitor_id`` and ``unique_session_id``. To
further secure the method, a randomly generated 32-byte salt is used, that
expires after 24 hours and is discarded. The salt values are stored in
Redis (or whichever backend Invenio-Cache uses). The ``unique_session_id``
is calculated in the same way as the ``visitor_id``, with the only
difference that it also takes into account the hour of the event . All of
these rules effectively mean that a user can have a unique ``visitor_id``
for each day and unique ``unique_session_id`` for each hour of a day.
This session ID generation process was designed according to the `Project
COUNTER Code of Practice <https://www.projectcounter.org/code-of-
practice-sections/general-information/>`_.
In addition to that the country of the user is extracted from the IP
address as a ISO 3166-1 alpha-2 two-letter country code (e.g. "CH" for
Switzerland).
|
def closest(self, dt1, dt2, *dts):
from functools import reduce
"""
Get the farthest date from the instance.
:type dt1: datetime.datetime
:type dt2: datetime.datetime
:type dts: list[datetime.datetime,]
:rtype: DateTime
"""
dt1 = pendulum.instance(dt1)
dt2 = pendulum.instance(dt2)
dts = [dt1, dt2] + [pendulum.instance(x) for x in dts]
dts = [(abs(self - dt), dt) for dt in dts]
return min(dts)[1]
|
Get the farthest date from the instance.
:type dt1: datetime.datetime
:type dt2: datetime.datetime
:type dts: list[datetime.datetime,]
:rtype: DateTime
|
async def start_pipe_server(
client_connected_cb,
*,
path,
loop=None,
limit=DEFAULT_LIMIT
):
"""
Start listening for connection using Windows named pipes.
"""
path = path.replace('/', '\\')
loop = loop or asyncio.get_event_loop()
def factory():
reader = asyncio.StreamReader(limit=limit, loop=loop)
protocol = asyncio.StreamReaderProtocol(
reader,
client_connected_cb,
loop=loop,
)
return protocol
server, *_ = await loop.start_serving_pipe(factory, address=path)
# The returned instance sadly doesn't have a `wait_closed` method so we add
# one.
closed = asyncio.Event(loop=loop)
original_close = server.close
def close():
original_close()
closed.set()
server.close = close
server.wait_closed = closed.wait
return server
|
Start listening for connection using Windows named pipes.
|
def to_pytime(self):
"""
Converts sql time object into Python's time object
this will truncate nanoseconds to microseconds
@return: naive time
"""
nanoseconds = self._nsec
hours = nanoseconds // 1000000000 // 60 // 60
nanoseconds -= hours * 60 * 60 * 1000000000
minutes = nanoseconds // 1000000000 // 60
nanoseconds -= minutes * 60 * 1000000000
seconds = nanoseconds // 1000000000
nanoseconds -= seconds * 1000000000
return datetime.time(hours, minutes, seconds, nanoseconds // 1000)
|
Converts sql time object into Python's time object
this will truncate nanoseconds to microseconds
@return: naive time
|
def make_wrapper(self, callable_):
"""Given a free-standing function 'callable', return a new
callable that will call 'callable' and report all exceptins,
using 'call_and_report_errors'."""
assert callable(callable_)
def wrapper(*args, **kw):
return self.call_and_report_errors(callable_, *args, **kw)
return wrapper
|
Given a free-standing function 'callable', return a new
callable that will call 'callable' and report all exceptins,
using 'call_and_report_errors'.
|
def identify_hosting_service(repo_url, hosting_services=HOSTING_SERVICES):
"""
Determines the hosting service of `repo_url`.
:param repo_url: Repo URL of unknown type.
:returns: Hosting service or raises UnknownHostingService exception.
"""
repo_url = unicode(repo_url)
for service in hosting_services:
if service in repo_url:
return service
raise UnknownHostingService
|
Determines the hosting service of `repo_url`.
:param repo_url: Repo URL of unknown type.
:returns: Hosting service or raises UnknownHostingService exception.
|
def series2cat(df:DataFrame, *col_names):
"Categorifies the columns `col_names` in `df`."
for c in listify(col_names): df[c] = df[c].astype('category').cat.as_ordered()
|
Categorifies the columns `col_names` in `df`.
|
def get_folder_id(folder_name, auth, url):
"""
Helper function takes str input of folder name and returns str numerical id of the folder.
:param folder_name: str name of the folder
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: str numerical id of the folder
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.icc import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> default_folder_id = get_folder_id('Default Folder', auth.creds, auth.url)
>>> assert type(default_folder_id) is int
"""
object_list = get_cfg_template(auth=auth, url=url)
for template in object_list:
if template['confFileName'] == folder_name:
return int(template['confFileId'])
return "Folder not found"
|
Helper function takes str input of folder name and returns str numerical id of the folder.
:param folder_name: str name of the folder
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: str numerical id of the folder
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.icc import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> default_folder_id = get_folder_id('Default Folder', auth.creds, auth.url)
>>> assert type(default_folder_id) is int
|
def hex_digit(coord, digit=1):
"""
Returns either the first or second digit of the hexadecimal representation of the given coordinate.
:param coord: hexadecimal coordinate, int
:param digit: 1 or 2, meaning either the first or second digit of the hexadecimal
:return: int, either the first or second digit
"""
if digit not in [1,2]:
raise ValueError('hex_digit can only get the first or second digit of a hex number, was passed digit={}'.format(
digit
))
return int(hex(coord)[1+digit], 16)
|
Returns either the first or second digit of the hexadecimal representation of the given coordinate.
:param coord: hexadecimal coordinate, int
:param digit: 1 or 2, meaning either the first or second digit of the hexadecimal
:return: int, either the first or second digit
|
def delete_country_by_id(cls, country_id, **kwargs):
"""Delete Country
Delete an instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_country_by_id_with_http_info(country_id, **kwargs)
else:
(data) = cls._delete_country_by_id_with_http_info(country_id, **kwargs)
return data
|
Delete Country
Delete an instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
|
def search_full(self, regex, return_string=True, advance_pointer=True):
"""
Search from the current position.
If `return_string` is false and a match is found, returns the number of
characters matched (from the current position *up to* the end of the
match).
>>> s = Scanner("test string")
>>> s.search_full(r' ')
'test '
>>> s.pos
5
>>> s.search_full(r'i', advance_pointer=False)
'stri'
>>> s.pos
5
>>> s.search_full(r'i', return_string=False, advance_pointer=False)
4
>>> s.pos
5
"""
regex = get_regex(regex)
self.match = regex.search(self.string, self.pos)
if not self.match:
return
start_pos = self.pos
if advance_pointer:
self.pos = self.match.end()
if return_string:
return self.string[start_pos:self.match.end()]
return (self.match.end() - start_pos)
|
Search from the current position.
If `return_string` is false and a match is found, returns the number of
characters matched (from the current position *up to* the end of the
match).
>>> s = Scanner("test string")
>>> s.search_full(r' ')
'test '
>>> s.pos
5
>>> s.search_full(r'i', advance_pointer=False)
'stri'
>>> s.pos
5
>>> s.search_full(r'i', return_string=False, advance_pointer=False)
4
>>> s.pos
5
|
def get_validation_fields(self):
'''get_validation_fields returns a list of tuples (each a field)
we only require the exp_id to coincide with the folder name, for the sake
of reproducibility (given that all are served from sample image or Github
organization). All other fields are optional.
To specify runtime variables, add to "experiment_variables"
0: not required, no warning
1: required, not valid
2: not required, warning
type: indicates the variable type
'''
return [("name",1,str), # required
("time",1,int),
("url",1,str),
("description",1, str),
("instructions",1, str),
("exp_id",1,str),
("install",0, list), # list of commands to install / build experiment
("contributors",0, list), # not required
("reference",0, list),
("cognitive_atlas_task_id",0,str),
("template",0,str)]
|
get_validation_fields returns a list of tuples (each a field)
we only require the exp_id to coincide with the folder name, for the sake
of reproducibility (given that all are served from sample image or Github
organization). All other fields are optional.
To specify runtime variables, add to "experiment_variables"
0: not required, no warning
1: required, not valid
2: not required, warning
type: indicates the variable type
|
def to_list(var):
"""Checks if given value is a list, tries to convert, if it is not."""
if var is None:
return []
if isinstance(var, str):
var = var.split('\n')
elif not isinstance(var, list):
try:
var = list(var)
except TypeError:
raise ValueError("{} cannot be converted to the list.".format(var))
return var
|
Checks if given value is a list, tries to convert, if it is not.
|
def dispatch(self, request, *args, **kwargs):
"""
Redefine parent's method.
Called on each new request from user.
Main difference between Django's approach and ours - we don't push
a 'request' to a method call. We use 'self.request' instead.
"""
# this part copied from django source code
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
# we changed only this line - removed first 'request' argument
return handler(*args, **kwargs)
|
Redefine parent's method.
Called on each new request from user.
Main difference between Django's approach and ours - we don't push
a 'request' to a method call. We use 'self.request' instead.
|
def _configure(
self, target, conf=None, logger=None, callconf=None, keepstate=None,
modules=None
):
"""Configure this class with input conf only if auto_conf or
configure is true.
This method should be overriden for specific conf
:param target: object to configure. self targets by default.
:param Configuration conf: configuration model to configure. Default is
this conf.
:param Logger logger: logger to use.
:param bool callconf: if True, use conf in target __call__ parameters.
:param bool keepstate: if True recreate sub objects if they already
exist.
:param list modules: modules to reload before.
:return: configured target.
"""
result = target
self.loadmodules(modules=modules)
modules = []
if conf is None:
conf = self.conf
if logger is None:
logger = self.logger
if callconf is None:
callconf = self.callparams
if keepstate is None:
keepstate = self.keepstate
subcats = {} # store sub configurable categories
params = [] # self parameters
sub_conf_prefix = Configurable.SUB_CONF_PREFIX
for cat in conf.values(): # separate sub params and params
cname = cat.name
if cname.startswith(sub_conf_prefix):
subcnames = cname.split(sub_conf_prefix)
pname = subcnames[1]
fcname = cname[1 + len(pname):]
if not fcname:
fcname = str(random())
fcat = cat.copy(name=fcname)
if pname in subcats:
subcats[pname].append(fcat)
else:
subcats[pname] = [fcat]
else:
cparams = cat.params
params += cparams.values()
if callconf and callable(target):
conf = self._toconf(params)
args, kwargs = self.getcallparams(conf=conf, target=target)
result = target = target(*args, **kwargs)
for param in params:
value, pname = param.value, param.name
if pname in subcats: # if sub param
subcallconf = True
if keepstate and hasattr(target, pname):
subcallconf = False
value = getattr(target, pname)
cats = subcats[pname]
subconf = configuration(*cats)
targets = applyconfiguration(
targets=[value], conf=subconf, callconf=subcallconf,
keepstate=keepstate, modules=modules
)
value = targets[0]
if param.error:
continue
elif self.foreigns or param.local:
try:
setattr(target, pname, value)
except Exception:
if logger is not None:
logger.error(
'Error while setting {0}({1}) on {2}: {3}'.format(
pname, value, target, format_exc()
)
)
return result
|
Configure this class with input conf only if auto_conf or
configure is true.
This method should be overriden for specific conf
:param target: object to configure. self targets by default.
:param Configuration conf: configuration model to configure. Default is
this conf.
:param Logger logger: logger to use.
:param bool callconf: if True, use conf in target __call__ parameters.
:param bool keepstate: if True recreate sub objects if they already
exist.
:param list modules: modules to reload before.
:return: configured target.
|
def process_streamer(self, streamer, callback=None):
"""Start streaming a streamer.
Args:
streamer (DataStreamer): The streamer itself.
callback (callable): An optional callable that will be called as:
callable(index, success, highest_id_received_from_other_side)
"""
index = streamer.index
if index in self._in_progress_streamers:
raise InternalError("You cannot add a streamer again until it has finished streaming.")
queue_item = QueuedStreamer(streamer, callback)
self._in_progress_streamers.add(index)
self._logger.debug("Streamer %d: queued to send %d readings", index, queue_item.initial_count)
self._queue.put_nowait(queue_item)
|
Start streaming a streamer.
Args:
streamer (DataStreamer): The streamer itself.
callback (callable): An optional callable that will be called as:
callable(index, success, highest_id_received_from_other_side)
|
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable.
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
|
Makes sure that time and panels are conformable.
|
def get_stop_words(self, language, fail_safe=False):
"""
Returns a StopWord object initialized with the stop words collection
requested by ``language``.
If the requested language is not available a StopWordError is raised.
If ``fail_safe`` is set to True, an empty StopWord object is returned.
"""
try:
language = self.language_codes[language]
except KeyError:
pass
collection = self.LOADED_LANGUAGES_CACHE.get(language)
if collection is None:
try:
collection = self._get_stop_words(language)
self.LOADED_LANGUAGES_CACHE[language] = collection
except StopWordError as error:
if not fail_safe:
raise error
collection = []
stop_words = StopWord(language, collection)
return stop_words
|
Returns a StopWord object initialized with the stop words collection
requested by ``language``.
If the requested language is not available a StopWordError is raised.
If ``fail_safe`` is set to True, an empty StopWord object is returned.
|
def _to_r(o, as_data=False, level=0):
"""Helper function to convert python data structures to R equivalents
TODO: a single model for transforming to r to handle
* function args
* lists as function args
"""
if o is None:
return "NA"
if isinstance(o, basestring):
return o
if hasattr(o, "r"):
# bridge to @property r on GGStatement(s)
return o.r
elif isinstance(o, bool):
return "TRUE" if o else "FALSE"
elif isinstance(o, (list, tuple)):
inner = ",".join([_to_r(x, True, level+1) for x in o])
return "c({})".format(inner) if as_data else inner
elif isinstance(o, dict):
inner = ",".join(["{}={}".format(k, _to_r(v, True, level+1))
for k, v in sorted(o.iteritems(), key=lambda x: x[0])])
return "list({})".format(inner) if as_data else inner
return str(o)
|
Helper function to convert python data structures to R equivalents
TODO: a single model for transforming to r to handle
* function args
* lists as function args
|
async def register(self, service):
"""Registers a new local service.
Returns:
bool: ``True`` on success
The register endpoint is used to add a new service,
with an optional health check, to the local agent.
The request body must look like::
{
"ID": "redis1",
"Name": "redis",
"Tags": [
"master",
"v1"
],
"Address": "127.0.0.1",
"Port": 8000,
"EnableTagOverride": False,
"Check": {
"DeregisterCriticalServiceAfter": timedelta(seconds=90),
"Script": "/usr/local/bin/check_redis.py",
"HTTP": "http://localhost:5000/health",
"Interval": timedelta(seconds=10),
"TTL": timedelta(seconds=15)
}
}
The **Name** field is mandatory. If an **ID** is not provided,
it is set to **Name**. You cannot have duplicate **ID** entries
per agent, so it may be necessary to provide an **ID** in the case
of a collision.
**Tags**, **Address**, **Port**, **Check** and **EnableTagOverride**
are optional.
If **Address** is not provided or left empty, then the agent's
address will be used as the address for the service during DNS
queries. When querying for services using HTTP endpoints such
as service health or service catalog and encountering an empty
**Address** field for a service, use the **Address** field of
the agent node associated with that instance of the service,
which is returned alongside the service information.
If **Check** is provided, only one of **Script**, **HTTP**, **TCP**
or **TTL** should be specified. **Script** and **HTTP** also require
**Interval**. The created check will be named "service:<ServiceId>".
Checks that are associated with a service may also contain an
optional **DeregisterCriticalServiceAfter** field, which is a timeout
in the same format as **Interval** and **TTL**. If a check is in the
critical state for more than this configured value, then its
associated service (and all of its associated checks) will
automatically be deregistered. The minimum timeout is 1 minute, and
the process that reaps critical services runs every 30 seconds, so it
may take slightly longer than the configured timeout to trigger the
deregistration. This should generally be configured with a timeout
that's much, much longer than any expected recoverable outage for the
given service.
**EnableTagOverride** can optionally be specified to disable the
anti-entropy feature for this service's tags. If **EnableTagOverride**
is set to ``True`` then external agents can update this service in the
catalog and modify the tags. Subsequent local sync operations by this
agent will ignore the updated tags. For instance, if an external agent
modified both the tags and the port for this service and
**EnableTagOverride** was set to true then after the next sync cycle
the service's port would revert to the original value but the tags
would maintain the updated value. As a counter example, if an external
agent modified both the tags and port for this service and
**EnableTagOverride** was set to false then after the next sync cycle
the service's port and the tags would revert to the original value and
all modifications would be lost. It's important to note that this
applies only to the locally registered service. If you have multiple
nodes all registering the same service their **EnableTagOverride**
configuration and all other service configuration items are
independent of one another. Updating the tags for the service
registered on one node is independent of the same service (by name)
registered on another node. If **EnableTagOverride** is not specified
the default value is ``False``.
"""
response = await self._api.put("/v1/agent/service/register",
data=service)
return response.status == 200
|
Registers a new local service.
Returns:
bool: ``True`` on success
The register endpoint is used to add a new service,
with an optional health check, to the local agent.
The request body must look like::
{
"ID": "redis1",
"Name": "redis",
"Tags": [
"master",
"v1"
],
"Address": "127.0.0.1",
"Port": 8000,
"EnableTagOverride": False,
"Check": {
"DeregisterCriticalServiceAfter": timedelta(seconds=90),
"Script": "/usr/local/bin/check_redis.py",
"HTTP": "http://localhost:5000/health",
"Interval": timedelta(seconds=10),
"TTL": timedelta(seconds=15)
}
}
The **Name** field is mandatory. If an **ID** is not provided,
it is set to **Name**. You cannot have duplicate **ID** entries
per agent, so it may be necessary to provide an **ID** in the case
of a collision.
**Tags**, **Address**, **Port**, **Check** and **EnableTagOverride**
are optional.
If **Address** is not provided or left empty, then the agent's
address will be used as the address for the service during DNS
queries. When querying for services using HTTP endpoints such
as service health or service catalog and encountering an empty
**Address** field for a service, use the **Address** field of
the agent node associated with that instance of the service,
which is returned alongside the service information.
If **Check** is provided, only one of **Script**, **HTTP**, **TCP**
or **TTL** should be specified. **Script** and **HTTP** also require
**Interval**. The created check will be named "service:<ServiceId>".
Checks that are associated with a service may also contain an
optional **DeregisterCriticalServiceAfter** field, which is a timeout
in the same format as **Interval** and **TTL**. If a check is in the
critical state for more than this configured value, then its
associated service (and all of its associated checks) will
automatically be deregistered. The minimum timeout is 1 minute, and
the process that reaps critical services runs every 30 seconds, so it
may take slightly longer than the configured timeout to trigger the
deregistration. This should generally be configured with a timeout
that's much, much longer than any expected recoverable outage for the
given service.
**EnableTagOverride** can optionally be specified to disable the
anti-entropy feature for this service's tags. If **EnableTagOverride**
is set to ``True`` then external agents can update this service in the
catalog and modify the tags. Subsequent local sync operations by this
agent will ignore the updated tags. For instance, if an external agent
modified both the tags and the port for this service and
**EnableTagOverride** was set to true then after the next sync cycle
the service's port would revert to the original value but the tags
would maintain the updated value. As a counter example, if an external
agent modified both the tags and port for this service and
**EnableTagOverride** was set to false then after the next sync cycle
the service's port and the tags would revert to the original value and
all modifications would be lost. It's important to note that this
applies only to the locally registered service. If you have multiple
nodes all registering the same service their **EnableTagOverride**
configuration and all other service configuration items are
independent of one another. Updating the tags for the service
registered on one node is independent of the same service (by name)
registered on another node. If **EnableTagOverride** is not specified
the default value is ``False``.
|
def display(self):
"""
Displays an overview containing descriptive stats for the Series
provided.
"""
print('Stats for %s from %s - %s' % (self.name, self.start, self.end))
if type(self.rf) is float:
print('Annual risk-free rate considered: %s' % (fmtp(self.rf)))
print('Summary:')
data = [[fmtp(self.total_return), fmtn(self.daily_sharpe),
fmtp(self.cagr), fmtp(self.max_drawdown)]]
print(tabulate(data, headers=['Total Return', 'Sharpe',
'CAGR', 'Max Drawdown']))
print('\nAnnualized Returns:')
data = [[fmtp(self.mtd), fmtp(self.three_month), fmtp(self.six_month),
fmtp(self.ytd), fmtp(self.one_year), fmtp(self.three_year),
fmtp(self.five_year), fmtp(self.ten_year),
fmtp(self.incep)]]
print(tabulate(data,
headers=['mtd', '3m', '6m', 'ytd', '1y',
'3y', '5y', '10y', 'incep.']))
print('\nPeriodic:')
data = [
['sharpe', fmtn(self.daily_sharpe), fmtn(self.monthly_sharpe),
fmtn(self.yearly_sharpe)],
['mean', fmtp(self.daily_mean), fmtp(self.monthly_mean),
fmtp(self.yearly_mean)],
['vol', fmtp(self.daily_vol), fmtp(self.monthly_vol),
fmtp(self.yearly_vol)],
['skew', fmtn(self.daily_skew), fmtn(self.monthly_skew),
fmtn(self.yearly_skew)],
['kurt', fmtn(self.daily_kurt), fmtn(self.monthly_kurt),
fmtn(self.yearly_kurt)],
['best', fmtp(self.best_day), fmtp(self.best_month),
fmtp(self.best_year)],
['worst', fmtp(self.worst_day), fmtp(self.worst_month),
fmtp(self.worst_year)]]
print(tabulate(data, headers=['daily', 'monthly', 'yearly']))
print('\nDrawdowns:')
data = [
[fmtp(self.max_drawdown), fmtp(self.avg_drawdown),
fmtn(self.avg_drawdown_days)]]
print(tabulate(data, headers=['max', 'avg', '# days']))
print('\nMisc:')
data = [['avg. up month', fmtp(self.avg_up_month)],
['avg. down month', fmtp(self.avg_down_month)],
['up year %', fmtp(self.win_year_perc)],
['12m up %', fmtp(self.twelve_month_win_perc)]]
print(tabulate(data))
|
Displays an overview containing descriptive stats for the Series
provided.
|
def get_serialize_format(self, mimetype):
""" Get the serialization format for the given mimetype """
format = self.formats.get(mimetype, None)
if format is None:
format = formats.get(mimetype, None)
return format
|
Get the serialization format for the given mimetype
|
def users(self, params):
"""
This is a private API and requires whitelisting from Twitter.
This endpoint will allow partners to add, update and remove users from a given
tailored_audience_id.
The endpoint will also accept multiple user identifier types per user as well.
"""
resource = self.RESOURCE_USERS.format(account_id=self.account.id, id=self.id)
headers = {'Content-Type': 'application/json'}
response = Request(self.account.client,
'post',
resource,
headers=headers,
body=json.dumps(params)).perform()
success_count = response.body['data']['success_count']
total_count = response.body['data']['total_count']
return (success_count, total_count)
|
This is a private API and requires whitelisting from Twitter.
This endpoint will allow partners to add, update and remove users from a given
tailored_audience_id.
The endpoint will also accept multiple user identifier types per user as well.
|
def get_method_by_idx(self, idx):
"""
Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object
"""
if self.__cached_methods_idx == None:
self.__cached_methods_idx = {}
for i in self.classes.class_def:
for j in i.get_methods():
self.__cached_methods_idx[j.get_method_idx()] = j
try:
return self.__cached_methods_idx[idx]
except KeyError:
return None
|
Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object
|
def save_process():
'''process for saving a graph'''
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
from MAVProxy.modules.lib.wxgrapheditor import GraphDialog
app = wx.App(False)
frame = GraphDialog('Graph Editor',
mestate.last_graph,
save_callback)
frame.ShowModal()
frame.Destroy()
|
process for saving a graph
|
def p_always(self, p):
'always : ALWAYS senslist always_statement'
p[0] = Always(p[2], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1))
|
always : ALWAYS senslist always_statement
|
def simple_profile(self, sex=None):
"""
Generates a basic profile with personal informations
"""
SEX = ["F", "M"]
if sex not in SEX:
sex = self.random_element(SEX)
if sex == 'F':
name = self.generator.name_female()
elif sex == 'M':
name = self.generator.name_male()
return {
"username": self.generator.user_name(),
"name": name,
"sex": sex,
"address": self.generator.address(),
"mail": self.generator.free_email(),
"birthdate": self.generator.date_of_birth(),
}
|
Generates a basic profile with personal informations
|
def get_taskfileinfo_selection(self, ):
"""Return a taskfileinfo that the user chose from the available options
:returns: the chosen taskfileinfo
:rtype: :class:`jukeboxcore.filesys.TaskFileInfo`
:raises: None
"""
sel = OptionSelector(self.reftrack)
sel.exec_()
return sel.selected
|
Return a taskfileinfo that the user chose from the available options
:returns: the chosen taskfileinfo
:rtype: :class:`jukeboxcore.filesys.TaskFileInfo`
:raises: None
|
def _get_all_data(self, start_date, end_date):
"""Get the needed data from all of the vars in the calculation."""
return [self._get_input_data(var, start_date, end_date)
for var in _replace_pressure(self.variables,
self.dtype_in_vert)]
|
Get the needed data from all of the vars in the calculation.
|
def view_count(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#get-view-count"
api_path = "/api/v2/views/{id}/count.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/views#get-view-count
|
def set_buffer(library, session, mask, size):
"""Sets the size for the formatted I/O and/or low-level I/O communication buffer(s).
Corresponds to viSetBuf function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param mask: Specifies the type of buffer. (Constants.READ_BUF, .WRITE_BUF, .IO_IN_BUF, .IO_OUT_BUF)
:param size: The size to be set for the specified buffer(s).
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
return library.viSetBuf(session, mask, size)
|
Sets the size for the formatted I/O and/or low-level I/O communication buffer(s).
Corresponds to viSetBuf function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param mask: Specifies the type of buffer. (Constants.READ_BUF, .WRITE_BUF, .IO_IN_BUF, .IO_OUT_BUF)
:param size: The size to be set for the specified buffer(s).
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
|
def get(self, mac):
"""Get data from API as instance of ResponseModel.
Keyword arguments:
mac -- MAC address or OUI for searching
"""
data = {
self._FORMAT_F: 'json',
self._SEARCH_F: mac
}
response = self.__decode_str(self.__call_api(self.__url, data), 'utf-8')
if len(response) > 0:
return self.__parse(response)
raise EmptyResponseException()
|
Get data from API as instance of ResponseModel.
Keyword arguments:
mac -- MAC address or OUI for searching
|
def path(self, goal):
"""Get the shortest way between two nodes of the graph
Args:
goal (str): Name of the targeted node
Return:
list of Node
"""
if goal == self.name:
return [self]
if goal not in self.routes:
raise ValueError("Unknown '{0}'".format(goal))
obj = self
path = [obj]
while True:
obj = obj.routes[goal].direction
path.append(obj)
if obj.name == goal:
break
return path
|
Get the shortest way between two nodes of the graph
Args:
goal (str): Name of the targeted node
Return:
list of Node
|
def read_files(path):
"""
For a directory full of files, retrieve it
as a dict with file_name:text
"""
template = {}
for file_name in os.listdir(path):
with open(os.path.join(path, file_name), 'r') as f:
template[file_name] = replace_whitespace(
f.read(), insert=True)
return template
|
For a directory full of files, retrieve it
as a dict with file_name:text
|
def delete_edge(self, ind_node, dep_node):
""" Delete an edge from the graph.
Args:
ind_node (str): The independent node to delete an edge from.
dep_node (str): The dependent node that has a dependency on the
ind_node.
Raises:
KeyError: Raised when the edge doesn't already exist.
"""
graph = self.graph
if dep_node not in graph.get(ind_node, []):
raise KeyError(
"No edge exists between %s and %s." % (ind_node, dep_node)
)
graph[ind_node].remove(dep_node)
|
Delete an edge from the graph.
Args:
ind_node (str): The independent node to delete an edge from.
dep_node (str): The dependent node that has a dependency on the
ind_node.
Raises:
KeyError: Raised when the edge doesn't already exist.
|
def dump_all_keys_or_addrs(wallet_obj):
'''
Offline-enabled mechanism to dump addresses
'''
print_traversal_warning()
puts('\nDo you understand this warning?')
if not confirm(user_prompt=DEFAULT_PROMPT, default=False):
puts(colored.red('Dump Cancelled!'))
return
mpub = wallet_obj.serialize_b58(private=False)
if wallet_obj.private_key:
desc_str = 'private keys'
else:
desc_str = 'addresses'
puts('Displaying Public Addresses Only')
puts('For Private Keys, please open bcwallet with your Master Private Key:\n')
priv_to_display = '%s123...' % first4mprv_from_mpub(mpub=mpub)
print_bcwallet_basic_priv_opening(priv_to_display=priv_to_display)
puts('How many %s (on each chain) do you want to dump?' % desc_str)
puts('Enter "b" to go back.\n')
num_keys = get_int(
user_prompt=DEFAULT_PROMPT,
max_int=10**5,
default_input='5',
show_default=True,
quit_ok=True,
)
if num_keys is False:
return
if wallet_obj.private_key:
print_childprivkey_warning()
puts('-' * 70)
for chain_int in (0, 1):
for current in range(0, num_keys):
path = "m/%d/%d" % (chain_int, current)
if current == 0:
if chain_int == 0:
print_external_chain()
print_key_path_header()
elif chain_int == 1:
print_internal_chain()
print_key_path_header()
child_wallet = wallet_obj.get_child_for_path(path)
if wallet_obj.private_key:
wif_to_use = child_wallet.export_to_wif()
else:
wif_to_use = None
print_path_info(
address=child_wallet.to_address(),
path=path,
wif=wif_to_use,
coin_symbol=coin_symbol_from_mkey(mpub),
)
puts(colored.blue('\nYou can compare this output to bip32.org'))
|
Offline-enabled mechanism to dump addresses
|
def _replace_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=None,
config='.ssh/authorized_keys'):
'''
Replace an existing key
'''
auth_line = _format_auth_line(key, enc, comment, options or [])
lines = []
full = _get_config_file(user, config)
try:
# open the file for both reading AND writing
with salt.utils.files.fopen(full, 'r') as _fh:
for line in _fh:
# We don't need any whitespace-only containing lines or arbitrary doubled newlines
line = salt.utils.stringutils.to_unicode(line.strip())
if line == '':
continue
line += '\n'
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
comps = re.findall(r'((.*)\s)?(ssh-[a-z0-9-]+|ecdsa-[a-z0-9-]+)\s([a-zA-Z0-9+/]+={0,2})(\s(.*))?', line)
if comps and len(comps[0]) > 3 and comps[0][3] == key:
# Found our key, replace it
lines.append(auth_line)
else:
lines.append(line)
_fh.close()
# Re-open the file writable after properly closing it
with salt.utils.files.fopen(full, 'wb') as _fh:
# Write out any changes
_fh.writelines(salt.utils.data.encode(lines))
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Problem reading or writing to key file: {0}'.format(exc)
)
|
Replace an existing key
|
def render(opts, functions, states=None, proxy=None, context=None):
'''
Returns the render modules
'''
if context is None:
context = {}
pack = {'__salt__': functions,
'__grains__': opts.get('grains', {}),
'__context__': context}
if states:
pack['__states__'] = states
pack['__proxy__'] = proxy or {}
ret = LazyLoader(
_module_dirs(
opts,
'renderers',
'render',
ext_type_dirs='render_dirs',
),
opts,
tag='render',
pack=pack,
)
rend = FilterDictWrapper(ret, '.render')
if not check_render_pipe_str(opts['renderer'], rend, opts['renderer_blacklist'], opts['renderer_whitelist']):
err = ('The renderer {0} is unavailable, this error is often because '
'the needed software is unavailable'.format(opts['renderer']))
log.critical(err)
raise LoaderError(err)
return rend
|
Returns the render modules
|
def build(self):
"""Generic entrypoint of `SymbolTableBuilder` class."""
self.load_builtins()
self.load_functions(self.tree)
self.visit(self.tree)
|
Generic entrypoint of `SymbolTableBuilder` class.
|
def wait(self, timeout=None):
"""
Waits for the client to stop its loop
"""
self.__stopped.wait(timeout)
return self.__stopped.is_set()
|
Waits for the client to stop its loop
|
def check(self, val):
"""Make sure given value is consistent with this `Key` specification.
NOTE: if `type` is 'None', then `listable` also is *not* checked.
"""
# If there is no `type` requirement, everything is allowed
if self.type is None:
return True
is_list = isinstance(val, list)
# If lists are not allowed, and this is a list --> false
if not self.listable and is_list:
return False
# `is_number` already checks for either list or single value
if self.type == KEY_TYPES.NUMERIC and not is_number(val):
return False
elif (self.type == KEY_TYPES.TIME and
not is_number(val) and '-' not in val and '/' not in val):
return False
elif self.type == KEY_TYPES.STRING:
# If its a list, check first element
if is_list:
if not isinstance(val[0], basestring):
return False
# Otherwise, check it
elif not isinstance(val, basestring):
return False
elif self.type == KEY_TYPES.BOOL:
if is_list and not isinstance(val[0], bool):
return False
elif not isinstance(val, bool):
return False
return True
|
Make sure given value is consistent with this `Key` specification.
NOTE: if `type` is 'None', then `listable` also is *not* checked.
|
def onStart(self, *args, **kwarg):
"""
Verify user input and kick off the client's program if valid
"""
with transactUI(self):
config = self.navbar.getActiveConfig()
config.resetErrors()
if config.isValid():
self.clientRunner.run(self.buildCliString())
self.showConsole()
else:
config.displayErrors()
self.Layout()
|
Verify user input and kick off the client's program if valid
|
def get_datarect(self):
"""Get the approximate bounding box of the displayed image.
Returns
-------
rect : tuple
Bounding box in data coordinates in the form of
``(x1, y1, x2, y2)``.
"""
x1, y1, x2, y2 = self._org_x1, self._org_y1, self._org_x2, self._org_y2
return (x1, y1, x2, y2)
|
Get the approximate bounding box of the displayed image.
Returns
-------
rect : tuple
Bounding box in data coordinates in the form of
``(x1, y1, x2, y2)``.
|
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(u'US/Eastern') is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> timezone('Asia/Shangri-La')
Traceback (most recent call last):
...
UnknownTimeZoneError: 'Asia/Shangri-La'
>>> timezone(u'\N{TRADE MARK SIGN}')
Traceback (most recent call last):
...
UnknownTimeZoneError: u'\u2122'
'''
if zone.upper() == 'UTC':
return utc
try:
zone = zone.encode('US-ASCII')
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if resource_exists(zone):
_tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone))
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
|
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(u'US/Eastern') is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> timezone('Asia/Shangri-La')
Traceback (most recent call last):
...
UnknownTimeZoneError: 'Asia/Shangri-La'
>>> timezone(u'\N{TRADE MARK SIGN}')
Traceback (most recent call last):
...
UnknownTimeZoneError: u'\u2122'
|
def symmetric_difference_update(self, other):
"""
Throws out all intervals except those only in self or other,
not both.
"""
other = set(other)
ivs = list(self)
for iv in ivs:
if iv in other:
self.remove(iv)
other.remove(iv)
self.update(other)
|
Throws out all intervals except those only in self or other,
not both.
|
def _update_model(self, completions):
"""
Creates a QStandardModel that holds the suggestion from the completion
models for the QCompleter
:param completionPrefix:
"""
# build the completion model
cc_model = QtGui.QStandardItemModel()
self._tooltips.clear()
for completion in completions:
name = completion['name']
item = QtGui.QStandardItem()
item.setData(name, QtCore.Qt.DisplayRole)
if 'tooltip' in completion and completion['tooltip']:
self._tooltips[name] = completion['tooltip']
if 'icon' in completion:
icon = completion['icon']
if isinstance(icon, list):
icon = QtGui.QIcon.fromTheme(icon[0], QtGui.QIcon(icon[1]))
else:
icon = QtGui.QIcon(icon)
item.setData(QtGui.QIcon(icon),
QtCore.Qt.DecorationRole)
cc_model.appendRow(item)
try:
self._completer.setModel(cc_model)
except RuntimeError:
self._create_completer()
self._completer.setModel(cc_model)
return cc_model
|
Creates a QStandardModel that holds the suggestion from the completion
models for the QCompleter
:param completionPrefix:
|
def delete_subscription(self):
"""Delete subscription for this thread.
:returns: bool
"""
url = self._build_url('subscription', base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
|
Delete subscription for this thread.
:returns: bool
|
def _parse_metadata_and_message_count(response):
'''
Extracts approximate messages count header.
'''
metadata = _parse_metadata(response)
metadata.approximate_message_count = _to_int(response.headers.get('x-ms-approximate-messages-count'))
return metadata
|
Extracts approximate messages count header.
|
def get_chunk_ranges(self, symbol, chunk_range=None, reverse=False):
"""
Returns a generator of (Start, End) tuples for each chunk in the symbol
Parameters
----------
symbol: str
the symbol for the given item in the DB
chunk_range: None, or a range object
allows you to subset the chunks by range
reverse: boolean
return the chunk ranges in reverse order
Returns
-------
generator
"""
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
c = CHUNKER_MAP[sym[CHUNKER]]
# all symbols have a segment 0
spec = {SYMBOL: symbol, SEGMENT: 0}
if chunk_range is not None:
spec.update(CHUNKER_MAP[sym[CHUNKER]].to_mongo(chunk_range))
for x in self._collection.find(spec,
projection=[START, END],
sort=[(START, pymongo.ASCENDING if not reverse else pymongo.DESCENDING)]):
yield (c.chunk_to_str(x[START]), c.chunk_to_str(x[END]))
|
Returns a generator of (Start, End) tuples for each chunk in the symbol
Parameters
----------
symbol: str
the symbol for the given item in the DB
chunk_range: None, or a range object
allows you to subset the chunks by range
reverse: boolean
return the chunk ranges in reverse order
Returns
-------
generator
|
def TimeField(formatter=types.DEFAULT_TIME_FORMAT, default=NOTHING,
required=True, repr=True, cmp=True, key=None):
"""
Create new time field on a model.
:param formatter: time formatter string (default: "%H:%M:%S")
:param default: any time or string that can be converted to a time value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
"""
default = _init_fields.init_default(required, default, None)
validator = _init_fields.init_validator(required, time)
converter = converters.to_time_field(formatter)
return attrib(default=default, converter=converter, validator=validator,
repr=repr, cmp=cmp,
metadata=dict(formatter=formatter, key=key))
|
Create new time field on a model.
:param formatter: time formatter string (default: "%H:%M:%S")
:param default: any time or string that can be converted to a time value
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
:param string key: override name of the value when converted to dict.
|
def newCatalog(sgml):
"""create a new Catalog. """
ret = libxml2mod.xmlNewCatalog(sgml)
if ret is None:raise treeError('xmlNewCatalog() failed')
return catalog(_obj=ret)
|
create a new Catalog.
|
def is_valid(self):
"""Return whether the current values of the form fields are all valid.
"""
self.cleaned_data = {}
self.changed_fields = []
self.validated = False
self._errors = {}
self._named_errors = {}
cleaned_data = {}
changed_fields = []
errors = {}
named_errors = {}
# Validate sub forms
for name, subform in self._forms.items():
if not subform.is_valid():
errors[name] = subform._errors
named_errors.update(subform._named_errors)
continue
if subform.has_changed:
changed_fields.append(name)
# Validate sub sets
for name, formset in self._sets.items():
if not formset.is_valid():
errors[name] = formset._errors
named_errors.update(formset._named_errors)
continue
if formset.has_changed:
changed_fields.append(name)
# Validate each field
for name, field in self._fields.items():
field.error = None
py_value = field.validate(self)
if field.error:
errors[name] = field.error
named_errors[field.name] = field.error
continue
cleaned_data[name] = py_value
if hasattr(field, '_deleted'):
cleaned_data[name] = None
field.has_changed = True
if field.has_changed:
changed_fields.append(name)
# Validate relation between fields
for name, field in self._fields.items():
field.validate(self, cleaned_data)
if field.error:
errors[name] = field.error
named_errors[field.name] = field.error
continue
if errors:
self._errors = errors
self._named_errors = named_errors
return False
self.changed_fields = changed_fields
self.cleaned_data = self.clean(cleaned_data)
self.validated = True
return True
|
Return whether the current values of the form fields are all valid.
|
def readValuesPyBigWig(self, reference, start, end):
"""
Use pyBigWig package to read a BigWig file for the
given range and return a protocol object.
pyBigWig returns an array of values that fill the query range.
Not sure if it is possible to get the step and span.
This method trims NaN values from the start and end.
pyBigWig throws an exception if end is outside of the
reference range. This function checks the query range
and throws its own exceptions to avoid the ones thrown
by pyBigWig.
"""
if not self.checkReference(reference):
raise exceptions.ReferenceNameNotFoundException(reference)
if start < 0:
start = 0
bw = pyBigWig.open(self._sourceFile)
referenceLen = bw.chroms(reference)
if referenceLen is None:
raise exceptions.ReferenceNameNotFoundException(reference)
if end > referenceLen:
end = referenceLen
if start >= end:
raise exceptions.ReferenceRangeErrorException(
reference, start, end)
data = protocol.Continuous()
curStart = start
curEnd = curStart + self._INCREMENT
while curStart < end:
if curEnd > end:
curEnd = end
for i, val in enumerate(bw.values(reference, curStart, curEnd)):
if not math.isnan(val):
if len(data.values) == 0:
data.start = curStart + i
data.values.append(val)
if len(data.values) == self._MAX_VALUES:
yield data
data = protocol.Continuous()
elif len(data.values) > 0:
# data.values.append(float('NaN'))
yield data
data = protocol.Continuous()
curStart = curEnd
curEnd = curStart + self._INCREMENT
bw.close()
if len(data.values) > 0:
yield data
|
Use pyBigWig package to read a BigWig file for the
given range and return a protocol object.
pyBigWig returns an array of values that fill the query range.
Not sure if it is possible to get the step and span.
This method trims NaN values from the start and end.
pyBigWig throws an exception if end is outside of the
reference range. This function checks the query range
and throws its own exceptions to avoid the ones thrown
by pyBigWig.
|
def closeEvent(self, event):
"""Handles closing of the window. If configs were edited, ask user to continue.
:param event: the close event
:type event: QCloseEvent
:returns: None
:rtype: None
:raises: None
"""
if self.inimodel.get_edited():
r = self.doc_modified_prompt()
if r == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
else:
event.accept()
|
Handles closing of the window. If configs were edited, ask user to continue.
:param event: the close event
:type event: QCloseEvent
:returns: None
:rtype: None
:raises: None
|
def on_trial_result(self, trial_runner, trial, result):
"""If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it."""
bracket, _ = self._trial_info[trial]
bracket.update_trial_stats(trial, result)
if bracket.continue_trial(trial):
return TrialScheduler.CONTINUE
action = self._process_bracket(trial_runner, bracket, trial)
return action
|
If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it.
|
def _upload_file_aws_cli(local_fname, bucket, keyname, config=None, mditems=None):
"""Streaming upload via the standard AWS command line interface.
"""
s3_fname = "s3://%s/%s" % (bucket, keyname)
args = ["--sse", "--expected-size", str(os.path.getsize(local_fname))]
if config:
if config.get("region"):
args += ["--region", config.get("region")]
if config.get("reduced_redundancy"):
args += ["--storage-class", "REDUCED_REDUNDANCY"]
cmd = [os.path.join(os.path.dirname(sys.executable), "aws"), "s3", "cp"] + args + \
[local_fname, s3_fname]
do.run(cmd, "Upload to s3: %s %s" % (bucket, keyname))
|
Streaming upload via the standard AWS command line interface.
|
def get_mopheader(expnum, ccd, version='p', prefix=None):
"""
Retrieve the mopheader, either from cache or from vospace
@param expnum:
@param ccd:
@param version:
@param prefix:
@return: Header
"""
prefix = prefix is None and "" or prefix
mopheader_uri = dbimages_uri(expnum=expnum,
ccd=ccd,
version=version,
prefix=prefix,
ext='.mopheader')
if mopheader_uri in mopheaders:
return mopheaders[mopheader_uri]
filename = os.path.basename(mopheader_uri)
if os.access(filename, os.F_OK):
logger.debug("File already on disk: {}".format(filename))
mopheader_fpt = StringIO(open(filename, 'r').read())
else:
mopheader_fpt = StringIO(open_vos_or_local(mopheader_uri).read())
with warnings.catch_warnings():
warnings.simplefilter('ignore', AstropyUserWarning)
mopheader = fits.open(mopheader_fpt)
# add some values to the mopheader so it can be an astrom header too.
header = mopheader[0].header
try:
header['FWHM'] = get_fwhm(expnum, ccd)
except IOError:
header['FWHM'] = 10
header['SCALE'] = mopheader[0].header['PIXSCALE']
header['NAX1'] = header['NAXIS1']
header['NAX2'] = header['NAXIS2']
header['MOPversion'] = header['MOP_VER']
header['MJD_OBS_CENTER'] = str(Time(header['MJD-OBSC'],
format='mjd',
scale='utc', precision=5).replicate(format='mpc'))
header['MAXCOUNT'] = MAXCOUNT
mopheaders[mopheader_uri] = header
mopheader.close()
return mopheaders[mopheader_uri]
|
Retrieve the mopheader, either from cache or from vospace
@param expnum:
@param ccd:
@param version:
@param prefix:
@return: Header
|
def define_charset(self, code, mode):
"""Define ``G0`` or ``G1`` charset.
:param str code: character set code, should be a character
from ``"B0UK"``, otherwise ignored.
:param str mode: if ``"("`` ``G0`` charset is defined, if
``")"`` -- we operate on ``G1``.
.. warning:: User-defined charsets are currently not supported.
"""
if code in cs.MAPS:
if mode == "(":
self.g0_charset = cs.MAPS[code]
elif mode == ")":
self.g1_charset = cs.MAPS[code]
|
Define ``G0`` or ``G1`` charset.
:param str code: character set code, should be a character
from ``"B0UK"``, otherwise ignored.
:param str mode: if ``"("`` ``G0`` charset is defined, if
``")"`` -- we operate on ``G1``.
.. warning:: User-defined charsets are currently not supported.
|
def get_patient_mhc_haplotype(job, patient_dict):
"""
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
"""
haplotype_archive = job.fileStore.readGlobalFile(patient_dict['hla_haplotype_files'])
haplotype_archive = untargz(haplotype_archive, os.getcwd())
output_dict = {}
for filename in 'mhci_alleles.list', 'mhcii_alleles.list':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(haplotype_archive,
filename))
return output_dict
|
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
|
def _compute_error(self):
""" Evaluate the absolute error of the Nystroem approximation for each column """
# err_i = sum_j R_{k,ij} A_{k,ji} - d_i
self._err = np.sum(np.multiply(self._R_k, self._C_k.T), axis=0) - self._d
|
Evaluate the absolute error of the Nystroem approximation for each column
|
def aliased_slot_names(self, slot_names: List[SlotDefinitionName]) -> Set[str]:
""" Return the aliased slot names for all members of the list
@param slot_names: actual slot names
@return: aliases w/ duplicates removed
"""
return {self.aliased_slot_name(sn) for sn in slot_names}
|
Return the aliased slot names for all members of the list
@param slot_names: actual slot names
@return: aliases w/ duplicates removed
|
def read(self):
'''Read some number of messages'''
found = Client.read(self)
# Redistribute our ready state if necessary
if self.needs_distribute_ready():
self.distribute_ready()
# Finally, return all the results we've read
return found
|
Read some number of messages
|
def associate(self, id_option_vip, id_environment_vip):
"""Create a relationship of OptionVip with EnvironmentVip.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param id_environment_vip: Identifier of the Environment VIP. Integer value and greater than zero.
:return: Following dictionary
::
{'opcoesvip_ambiente_xref': {'id': < id_opcoesvip_ambiente_xref >} }
:raise InvalidParameterError: Option VIP/Environment VIP identifier is null and/or invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise OptionVipError: Option vip is already associated with the environment vip.
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_vip):
raise InvalidParameterError(
u'The identifier of Option VIP is invalid or was not informed.')
if not is_valid_int_param(id_environment_vip):
raise InvalidParameterError(
u'The identifier of Environment VIP is invalid or was not informed.')
url = 'optionvip/' + \
str(id_option_vip) + '/environmentvip/' + str(id_environment_vip) + '/'
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml)
|
Create a relationship of OptionVip with EnvironmentVip.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param id_environment_vip: Identifier of the Environment VIP. Integer value and greater than zero.
:return: Following dictionary
::
{'opcoesvip_ambiente_xref': {'id': < id_opcoesvip_ambiente_xref >} }
:raise InvalidParameterError: Option VIP/Environment VIP identifier is null and/or invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise OptionVipError: Option vip is already associated with the environment vip.
:raise UserNotAuthorizedError: User does not have authorization to make this association.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
|
def cleanup(self):
'''
remove sockets on shutdown
'''
log.debug('ConCache cleaning up')
if os.path.exists(self.cache_sock):
os.remove(self.cache_sock)
if os.path.exists(self.update_sock):
os.remove(self.update_sock)
if os.path.exists(self.upd_t_sock):
os.remove(self.upd_t_sock)
|
remove sockets on shutdown
|
def validate(self, result, spec): # noqa Yes, it's too complex.
"""Validate that the result has the correct structure."""
if spec is None:
# None matches anything.
return
if isinstance(spec, dict):
if not isinstance(result, dict):
raise ValueError('Dictionary expected, but %r found.' % result)
if spec:
spec_value = next(iter(spec.values())) # Yay Python 3!
for value in result.values():
self.validate(value, spec_value)
spec_key = next(iter(spec.keys()))
for key in result:
self.validate(key, spec_key)
if isinstance(spec, list):
if not isinstance(result, list):
raise ValueError('List expected, but %r found.' % result)
if spec:
for value in result:
self.validate(value, spec[0])
if isinstance(spec, tuple):
if not isinstance(result, tuple):
raise ValueError('Tuple expected, but %r found.' % result)
if len(result) != len(spec):
raise ValueError('Expected %d elements in tuple %r.' %
(len(spec), result))
for s, value in zip(spec, result):
self.validate(value, s)
if isinstance(spec, six.string_types):
if not isinstance(result, six.string_types):
raise ValueError('String expected, but %r found.' % result)
if isinstance(spec, int):
if not isinstance(result, int):
raise ValueError('Integer expected, but %r found.' % result)
if isinstance(spec, bool):
if not isinstance(result, bool):
raise ValueError('Boolean expected, but %r found.' % result)
|
Validate that the result has the correct structure.
|
def sourcehook(self, newfile, encoding='utf-8'):
"Hook called on a filename to be sourced."
from codecs import open
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, basestring) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r", encoding))
|
Hook called on a filename to be sourced.
|
def setup_logging(fail_silently=False):
"""
Setup logging configuration
Finds the most user-facing log config on disk and uses it
"""
config = None
paths = list(get_config_paths(filename='logconfig.yml', reversed=True))
for path in paths:
if not os.path.exists(path):
continue
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
LOG_LEVEL = os.environ.get('LOG_LEVEL')
if LOG_LEVEL:
config['root']['level'] = LOG_LEVEL.upper()
config['handlers']['console']['level'] = LOG_LEVEL.upper()
logging.config.dictConfig(config)
break
else:
if not fail_silently:
raise LogconfigError('Unable to find logconfig in {}'.format(paths))
return config
|
Setup logging configuration
Finds the most user-facing log config on disk and uses it
|
def check(self, password: str) -> bool:
"""
Checks the given password with the one stored
in the database
"""
return (
pbkdf2_sha512.verify(password, self.password) or
pbkdf2_sha512.verify(password,
pbkdf2_sha512.encrypt(self.api_key))
)
|
Checks the given password with the one stored
in the database
|
def add_trial(self, trial):
"""Adds a new trial to this TrialRunner.
Trials may be added at any time.
Args:
trial (Trial): Trial to queue.
"""
trial.set_verbose(self._verbose)
self._trials.append(trial)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(self, trial)
self.trial_executor.try_checkpoint_metadata(trial)
|
Adds a new trial to this TrialRunner.
Trials may be added at any time.
Args:
trial (Trial): Trial to queue.
|
def summary_plot(
pymc_obj, name='model', format='png', suffix='-summary', path='./',
alpha=0.05, chain=None, quartiles=True, hpd=True, rhat=True, main=None,
xlab=None, x_range=None, custom_labels=None, chain_spacing=0.05, vline_pos=0):
"""
Model summary plot
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either the
set of nodes in a given model, or a specified set of nodes.
:Arguments:
pymc_obj: PyMC object, trace or array
A trace from an MCMC sample or a PyMC object with one or more traces.
name (optional): string
The name of the object.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to 0.05).
chain (optional): int
Where there are multiple chains, specify a particular chain to plot.
If not specified (chain=None), all chains are plotted.
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
hpd (optional): bool
Flag for plotting the highest probability density (HPD) interval
instead of the central (1-alpha)*100% interval (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing False (default) results in default titles.
xlab (optional): string
Label for x-axis. Defaults to no label
x_range (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
custom_labels (optional): list
User-defined labels for each node. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline_pos (optional): numeric
Location of vertical reference line (defaults to 0).
"""
if not gridspec:
print_(
'\nYour installation of matplotlib is not recent enough to support summary_plot; this function is disabled until matplotlib is updated.')
return
# Quantiles to be calculated
quantiles = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quartiles:
quantiles = [100 * alpha / 2, 25, 50, 75, 100 * (1 - alpha / 2)]
# Range for x-axis
plotrange = None
# Gridspec
gs = None
# Subplots
interval_plot = None
rhat_plot = None
try:
# First try Model type
vars = pymc_obj._variables_to_tally
except AttributeError:
try:
# Try a database object
vars = pymc_obj._traces
except AttributeError:
if isinstance(pymc_obj, Variable):
vars = [pymc_obj]
else:
# Assume an iterable
vars = pymc_obj
from .diagnostics import gelman_rubin
# Calculate G-R diagnostics
if rhat:
try:
R = {}
for variable in vars:
R[variable.__name__] = gelman_rubin(variable)
except (ValueError, TypeError):
print(
'Could not calculate Gelman-Rubin statistics. Requires multiple chains of equal length.')
rhat = False
# Empty list for y-axis labels
labels = []
# Counter for current variable
var = 1
# Make sure there is something to print
if all([v._plot == False for v in vars]):
print_('No variables to plot')
return
for variable in vars:
# If plot flag is off, do not print
if variable._plot == False:
continue
# Extract name
varname = variable.__name__
# Retrieve trace(s)
if chain is not None:
chains = 1
traces = [variable.trace(chain=chain)]
else:
chains = variable.trace.db.chains
traces = [variable.trace(chain=i) for i in range(chains)]
if gs is None:
# Initialize plot
if rhat and chains > 1:
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
else:
gs = gridspec.GridSpec(1, 1)
# Subplot for confidence intervals
interval_plot = subplot(gs[0])
# Get quantiles
data = [calc_quantiles(d, quantiles) for d in traces]
if hpd:
# Substitute HPD interval
for i, d in enumerate(traces):
hpd_interval = calc_hpd(d, alpha)
data[i][quantiles[0]] = hpd_interval[0]
data[i][quantiles[-1]] = hpd_interval[1]
data = [[d[q] for q in quantiles] for d in data]
# Ensure x-axis contains range of current interval
if plotrange:
plotrange = [min(
plotrange[0],
nmin(data)),
max(plotrange[1],
nmax(data))]
else:
plotrange = [nmin(data), nmax(data)]
try:
# First try missing-value stochastic
value = variable.get_stoch_value()
except AttributeError:
# All other variable types
value = variable.value
# Number of elements in current variable
k = size(value)
# Append variable name(s) to list
if k > 1:
names = var_str(varname, shape(value)[int(shape(value)[0]==1):])
labels += names
else:
labels.append(varname)
# labels.append('\n'.join(varname.split('_')))
# Add spacing for each chain, if more than one
e = [0] + [(chain_spacing * ((i + 2) / 2)) * (
-1) ** i for i in range(chains - 1)]
# Loop over chains
for j, quants in enumerate(data):
# Deal with multivariate nodes
if k > 1:
ravelled_quants = list(map(ravel, quants))
for i, quant in enumerate(transpose(ravelled_quants)):
q = ravel(quant)
# Y coordinate with jitter
y = -(var + i) + e[j]
if quartiles:
# Plot median
pyplot(q[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(q[1],
q[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
pyplot(q[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(q[0],
q[-1]),
y=(y,
y),
linewidth=1,
color="blue")
else:
# Y coordinate with jitter
y = -var + e[j]
if quartiles:
# Plot median
pyplot(quants[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(quants[1],
quants[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
pyplot(quants[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(quants[0],
quants[-1]),
y=(y,
y),
linewidth=1,
color="blue")
# Increment index
var += k
if custom_labels is not None:
labels = custom_labels
# Update margins
left_margin = max([len(x) for x in labels]) * 0.015
gs.update(left=left_margin, right=0.95, top=0.9, bottom=0.05)
# Define range of y-axis
ylim(-var + 0.5, -0.5)
datarange = plotrange[1] - plotrange[0]
xlim(plotrange[0] - 0.05 * datarange, plotrange[1] + 0.05 * datarange)
# Add variable labels
yticks([-(l + 1) for l in range(len(labels))], labels)
# Add title
if main is not False:
plot_title = main or str(int((
1 - alpha) * 100)) + "% Credible Intervals"
title(plot_title)
# Add x-axis label
if xlab is not None:
xlabel(xlab)
# Constrain to specified range
if x_range is not None:
xlim(*x_range)
# Remove ticklines on y-axes
for ticks in interval_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in six.iteritems(interval_plot.spines):
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
# Reference line
axvline(vline_pos, color='k', linestyle='--')
# Genenerate Gelman-Rubin plot
if rhat and chains > 1:
# If there are multiple chains, calculate R-hat
rhat_plot = subplot(gs[1])
if main is not False:
title("R-hat")
# Set x range
xlim(0.9, 2.1)
# X axis labels
xticks((1.0, 1.5, 2.0), ("1", "1.5", "2+"))
yticks([-(l + 1) for l in range(len(labels))], "")
i = 1
for variable in vars:
if variable._plot == False:
continue
# Extract name
varname = variable.__name__
try:
value = variable.get_stoch_value()
except AttributeError:
value = variable.value
k = size(value)
if k > 1:
pyplot([min(r, 2) for r in R[varname]], [-(j + i)
for j in range(k)], 'bo', markersize=4)
else:
pyplot(min(R[varname], 2), -i, 'bo', markersize=4)
i += k
# Define range of y-axis
ylim(-i + 0.5, -0.5)
# Remove ticklines on y-axes
for ticks in rhat_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in six.iteritems(rhat_plot.spines):
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
savefig("%s%s%s.%s" % (path, name, suffix, format))
|
Model summary plot
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either the
set of nodes in a given model, or a specified set of nodes.
:Arguments:
pymc_obj: PyMC object, trace or array
A trace from an MCMC sample or a PyMC object with one or more traces.
name (optional): string
The name of the object.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to 0.05).
chain (optional): int
Where there are multiple chains, specify a particular chain to plot.
If not specified (chain=None), all chains are plotted.
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
hpd (optional): bool
Flag for plotting the highest probability density (HPD) interval
instead of the central (1-alpha)*100% interval (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing False (default) results in default titles.
xlab (optional): string
Label for x-axis. Defaults to no label
x_range (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
custom_labels (optional): list
User-defined labels for each node. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline_pos (optional): numeric
Location of vertical reference line (defaults to 0).
|
def calibrate_counts(array, attributes, index):
"""Calibration for counts channels."""
offset = np.float32(attributes["corrected_counts_offsets"][index])
scale = np.float32(attributes["corrected_counts_scales"][index])
array = (array - offset) * scale
return array
|
Calibration for counts channels.
|
def addresses(self):
"""
Access the addresses
:returns: twilio.rest.api.v2010.account.address.AddressList
:rtype: twilio.rest.api.v2010.account.address.AddressList
"""
if self._addresses is None:
self._addresses = AddressList(self._version, account_sid=self._solution['sid'], )
return self._addresses
|
Access the addresses
:returns: twilio.rest.api.v2010.account.address.AddressList
:rtype: twilio.rest.api.v2010.account.address.AddressList
|
def unpickle(pickle_file):
"""Unpickle a python object from the given path."""
pickle = None
with open(pickle_file, "rb") as pickle_f:
pickle = dill.load(pickle_f)
if not pickle:
LOG.error("Could not load python object from file")
return pickle
|
Unpickle a python object from the given path.
|
def interactions_iter(self, nbunch=None, t=None):
"""Return an iterator over the interaction present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
See Also
--------
interaction : return a list of interaction
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2], 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.interactions_iter())
[(0, 1), (1, 2), (2, 3)]
"""
seen = {} # helper dict to keep track of multiply stored interactions
if nbunch is None:
nodes_nbrs = self._adj.items()
else:
nodes_nbrs = ((n, self._adj[n]) for n in self.nbunch_iter(nbunch))
for n, nbrs in nodes_nbrs:
for nbr in nbrs:
if t is not None:
if nbr not in seen and self.__presence_test(n, nbr, t):
yield (n, nbr, {"t": [t]})
else:
if nbr not in seen:
yield (n, nbr, self._adj[n][nbr])
seen[n] = 1
del seen
|
Return an iterator over the interaction present in a given snapshot.
Edges are returned as tuples
in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
See Also
--------
interaction : return a list of interaction
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2], 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.interactions_iter())
[(0, 1), (1, 2), (2, 3)]
|
def appendMissingSignatures(self):
""" Store which accounts/keys are supposed to sign the transaction
This method is used for an offline-signer!
"""
missing_signatures = self.get("missing_signatures", [])
for pub in missing_signatures:
wif = self.blockchain.wallet.getPrivateKeyForPublicKey(pub)
if wif:
self.appendWif(wif)
|
Store which accounts/keys are supposed to sign the transaction
This method is used for an offline-signer!
|
def select_good_pixel_region(hits, col_span, row_span, min_cut_threshold=0.2, max_cut_threshold=2.0):
'''Takes the hit array and masks all pixels with a certain occupancy.
Parameters
----------
hits : array like
If dim > 2 the additional dimensions are summed up.
min_cut_threshold : float
A number to specify the minimum threshold, which pixel to take. Pixels are masked if
occupancy < min_cut_threshold * np.ma.median(occupancy)
0 means that no pixels are masked
max_cut_threshold : float
A number to specify the maximum threshold, which pixel to take. Pixels are masked if
occupancy > max_cut_threshold * np.ma.median(occupancy)
Can be set to None that no pixels are masked by max_cut_threshold
Returns
-------
numpy.ma.array, shape=(80,336)
The hits array with masked pixels.
'''
hits = np.sum(hits, axis=(-1)).astype('u8')
mask = np.ones(shape=(80, 336), dtype=np.uint8)
mask[min(col_span):max(col_span) + 1, min(row_span):max(row_span) + 1] = 0
ma = np.ma.masked_where(mask, hits)
if max_cut_threshold is not None:
return np.ma.masked_where(np.logical_or(ma < min_cut_threshold * np.ma.median(ma), ma > max_cut_threshold * np.ma.median(ma)), ma)
else:
return np.ma.masked_where(ma < min_cut_threshold * np.ma.median(ma), ma)
|
Takes the hit array and masks all pixels with a certain occupancy.
Parameters
----------
hits : array like
If dim > 2 the additional dimensions are summed up.
min_cut_threshold : float
A number to specify the minimum threshold, which pixel to take. Pixels are masked if
occupancy < min_cut_threshold * np.ma.median(occupancy)
0 means that no pixels are masked
max_cut_threshold : float
A number to specify the maximum threshold, which pixel to take. Pixels are masked if
occupancy > max_cut_threshold * np.ma.median(occupancy)
Can be set to None that no pixels are masked by max_cut_threshold
Returns
-------
numpy.ma.array, shape=(80,336)
The hits array with masked pixels.
|
def get_default(self, ctx):
"""Given a context variable this calculates the default value."""
# Otherwise go with the regular default.
if callable(self.default):
rv = self.default()
else:
rv = self.default
return self.type_cast_value(ctx, rv)
|
Given a context variable this calculates the default value.
|
def cleanup_sweep_threads():
'''
Not used. Keeping this function in case we decide not to use
daemonized threads and it becomes necessary to clean up the
running threads upon exit.
'''
for dict_name, obj in globals().items():
if isinstance(obj, (TimedDict,)):
logging.info(
'Stopping thread for TimedDict {dict_name}'.format(
dict_name=dict_name))
obj.stop_sweep()
|
Not used. Keeping this function in case we decide not to use
daemonized threads and it becomes necessary to clean up the
running threads upon exit.
|
def explain_feature(featurename):
'''print the location of single feature and its version
if the feature is located inside a git repository,
this will also print the git-rev and modified files
'''
import os
import featuremonkey
import importlib
import subprocess
def guess_version(feature_module):
if hasattr(feature_module, '__version__'):
return feature_module.__version__
if hasattr(feature_module, 'get_version'):
return feature_module.get_version()
return ('unable to determine version:'
' please add __version__ or get_version()'
' to this feature module!')
def git_rev(module):
stdout, stderr = subprocess.Popen(
["git", "rev-parse", "HEAD"],
cwd=os.path.dirname(module.__file__),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
if 'Not a git repo' in stderr:
return '-'
else:
return stdout.strip()
def git_changes(module):
stdout = subprocess.Popen(
["git", "diff", "--name-only"],
cwd=os.path.dirname(module.__file__),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()[0]
return stdout.strip() or '-'
if featurename in featuremonkey.get_features_from_equation_file(os.environ['PRODUCT_EQUATION_FILENAME']):
print()
print(featurename)
print('-' * 60)
print()
is_subfeature = '.features.' in featurename
try:
feature_module = importlib.import_module(featurename)
except ImportError:
print('Error: unable to import feature "%s"' % featurename)
print('Location: %s' % os.path.dirname(feature_module.__file__))
print()
if is_subfeature:
print('Version: see parent feature')
print()
else:
print('Version: %s' % str(guess_version(feature_module)))
print()
print('git: %s' % git_rev(feature_module))
print()
print('git changed: %s' % '\n\t\t'.join(git_changes(feature_module).split('\n')))
else:
print('No feature named ' + featurename)
|
print the location of single feature and its version
if the feature is located inside a git repository,
this will also print the git-rev and modified files
|
def update(self, status=values.unset, announce_url=values.unset,
announce_method=values.unset):
"""
Update the ConferenceInstance
:param ConferenceInstance.UpdateStatus status: The new status of the resource
:param unicode announce_url: The URL we should call to announce something into the conference
:param unicode announce_method: he HTTP method used to call announce_url
:returns: Updated ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
"""
data = values.of({'Status': status, 'AnnounceUrl': announce_url, 'AnnounceMethod': announce_method, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ConferenceInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
|
Update the ConferenceInstance
:param ConferenceInstance.UpdateStatus status: The new status of the resource
:param unicode announce_url: The URL we should call to announce something into the conference
:param unicode announce_method: he HTTP method used to call announce_url
:returns: Updated ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
|
def fix_e224(self, result):
"""Remove extraneous whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + target[offset:].replace('\t', ' ')
self.source[result['line'] - 1] = fixed
|
Remove extraneous whitespace around operator.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.