code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def atlasdb_cache_zonefile_info( con=None, path=None ):
"""
Load up and cache our zonefile inventory from the database
"""
global ZONEFILE_INV, NUM_ZONEFILES, ZONEFILE_INV_LOCK
inv = None
with ZONEFILE_INV_LOCK:
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
ZONEFILE_INV = inv
NUM_ZONEFILES = inv_len
return inv | Load up and cache our zonefile inventory from the database | Below is the the instruction that describes the task:
### Input:
Load up and cache our zonefile inventory from the database
### Response:
def atlasdb_cache_zonefile_info( con=None, path=None ):
"""
Load up and cache our zonefile inventory from the database
"""
global ZONEFILE_INV, NUM_ZONEFILES, ZONEFILE_INV_LOCK
inv = None
with ZONEFILE_INV_LOCK:
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
ZONEFILE_INV = inv
NUM_ZONEFILES = inv_len
return inv |
def _OpenFileObject(self, path_spec):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyqcow.file: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
qcow_file = pyqcow.file()
qcow_file.open_file_object(file_object)
return qcow_file | Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyqcow.file: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect. | Below is the the instruction that describes the task:
### Input:
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyqcow.file: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
### Response:
def _OpenFileObject(self, path_spec):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyqcow.file: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
qcow_file = pyqcow.file()
qcow_file.open_file_object(file_object)
return qcow_file |
def create_device(self, layout):
"""Creates a uinput device using the specified layout."""
events = {ecodes.EV_ABS: [], ecodes.EV_KEY: [],
ecodes.EV_REL: []}
# Joystick device
if layout.axes or layout.buttons or layout.hats:
self.joystick_dev = next_joystick_device()
for name in layout.axes:
params = layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)
if not absInfoUsesValue:
params = params[1:]
events[ecodes.EV_ABS].append((name, params))
for name in layout.hats:
params = (0, -1, 1, 0, 0)
if not absInfoUsesValue:
params = params[1:]
events[ecodes.EV_ABS].append((name, params))
for name in layout.buttons:
events[ecodes.EV_KEY].append(name)
if layout.mouse:
self.mouse_pos = {}
self.mouse_rel = {}
self.mouse_analog_sensitivity = float(
layout.mouse_options.get("MOUSE_SENSITIVITY",
DEFAULT_MOUSE_SENSITIVTY)
)
self.mouse_analog_deadzone = int(
layout.mouse_options.get("MOUSE_DEADZONE",
DEFAULT_MOUSE_DEADZONE)
)
self.scroll_repeat_delay = float(
layout.mouse_options.get("MOUSE_SCROLL_REPEAT_DELAY",
DEFAULT_SCROLL_REPEAT_DELAY)
)
self.scroll_delay = float(
layout.mouse_options.get("MOUSE_SCROLL_DELAY",
DEFAULT_SCROLL_DELAY)
)
for name in layout.mouse:
if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):
if ecodes.REL_WHEEL not in events[ecodes.EV_REL]:
# This ensures that scroll wheel events can work
events[ecodes.EV_REL].append(ecodes.REL_WHEEL)
else:
events[ecodes.EV_REL].append(name)
self.mouse_rel[name] = 0.0
self.device = UInput(name=layout.name, events=events,
bustype=layout.bustype, vendor=layout.vendor,
product=layout.product, version=layout.version)
self.layout = layout | Creates a uinput device using the specified layout. | Below is the the instruction that describes the task:
### Input:
Creates a uinput device using the specified layout.
### Response:
def create_device(self, layout):
"""Creates a uinput device using the specified layout."""
events = {ecodes.EV_ABS: [], ecodes.EV_KEY: [],
ecodes.EV_REL: []}
# Joystick device
if layout.axes or layout.buttons or layout.hats:
self.joystick_dev = next_joystick_device()
for name in layout.axes:
params = layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)
if not absInfoUsesValue:
params = params[1:]
events[ecodes.EV_ABS].append((name, params))
for name in layout.hats:
params = (0, -1, 1, 0, 0)
if not absInfoUsesValue:
params = params[1:]
events[ecodes.EV_ABS].append((name, params))
for name in layout.buttons:
events[ecodes.EV_KEY].append(name)
if layout.mouse:
self.mouse_pos = {}
self.mouse_rel = {}
self.mouse_analog_sensitivity = float(
layout.mouse_options.get("MOUSE_SENSITIVITY",
DEFAULT_MOUSE_SENSITIVTY)
)
self.mouse_analog_deadzone = int(
layout.mouse_options.get("MOUSE_DEADZONE",
DEFAULT_MOUSE_DEADZONE)
)
self.scroll_repeat_delay = float(
layout.mouse_options.get("MOUSE_SCROLL_REPEAT_DELAY",
DEFAULT_SCROLL_REPEAT_DELAY)
)
self.scroll_delay = float(
layout.mouse_options.get("MOUSE_SCROLL_DELAY",
DEFAULT_SCROLL_DELAY)
)
for name in layout.mouse:
if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):
if ecodes.REL_WHEEL not in events[ecodes.EV_REL]:
# This ensures that scroll wheel events can work
events[ecodes.EV_REL].append(ecodes.REL_WHEEL)
else:
events[ecodes.EV_REL].append(name)
self.mouse_rel[name] = 0.0
self.device = UInput(name=layout.name, events=events,
bustype=layout.bustype, vendor=layout.vendor,
product=layout.product, version=layout.version)
self.layout = layout |
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response) | Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010) | Below is the the instruction that describes the task:
### Input:
Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
### Response:
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response) |
def _reset_env(self, env: BaseUnityEnvironment):
"""Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
"""
if self.meta_curriculum is not None:
return env.reset(train_mode=self.fast_simulation, config=self.meta_curriculum.get_config())
else:
return env.reset(train_mode=self.fast_simulation) | Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment. | Below is the the instruction that describes the task:
### Input:
Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
### Response:
def _reset_env(self, env: BaseUnityEnvironment):
"""Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
"""
if self.meta_curriculum is not None:
return env.reset(train_mode=self.fast_simulation, config=self.meta_curriculum.get_config())
else:
return env.reset(train_mode=self.fast_simulation) |
def cancel_download_task(self, task_id, expires=None, **kwargs):
"""取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
data = {
'expires': expires,
'task_id': task_id,
}
return self._request('services/cloud_dl', 'cancle_task',
data=data, **kwargs) | 取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象 | Below is the the instruction that describes the task:
### Input:
取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
### Response:
def cancel_download_task(self, task_id, expires=None, **kwargs):
"""取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: Response 对象
"""
data = {
'expires': expires,
'task_id': task_id,
}
return self._request('services/cloud_dl', 'cancle_task',
data=data, **kwargs) |
def ungrant_role(self, principal, role, object=None):
"""Ungrant `role` to `user` (either globally, if `object` is None, or
on the specific `object`)."""
assert principal
principal = unwrap(principal)
session = object_session(object) if object is not None else db.session
manager = self._current_user_manager(session=session)
args = {
"role": role,
"object": object,
"anonymous": False,
"user": None,
"group": None,
}
query = session.query(RoleAssignment)
query = query.filter(
RoleAssignment.role == role, RoleAssignment.object == object
)
if principal is AnonymousRole or (
hasattr(principal, "is_anonymous") and principal.is_anonymous
):
args["anonymous"] = True
query.filter(
RoleAssignment.anonymous == False,
RoleAssignment.user == None,
RoleAssignment.group == None,
)
elif isinstance(principal, User):
args["user"] = principal
query = query.filter(RoleAssignment.user == principal)
else:
args["group"] = principal
query = query.filter(RoleAssignment.group == principal)
ra = query.one()
session.delete(ra)
audit = SecurityAudit(manager=manager, op=SecurityAudit.REVOKE, **args)
session.add(audit)
self._needs_flush()
self._clear_role_cache(principal) | Ungrant `role` to `user` (either globally, if `object` is None, or
on the specific `object`). | Below is the the instruction that describes the task:
### Input:
Ungrant `role` to `user` (either globally, if `object` is None, or
on the specific `object`).
### Response:
def ungrant_role(self, principal, role, object=None):
"""Ungrant `role` to `user` (either globally, if `object` is None, or
on the specific `object`)."""
assert principal
principal = unwrap(principal)
session = object_session(object) if object is not None else db.session
manager = self._current_user_manager(session=session)
args = {
"role": role,
"object": object,
"anonymous": False,
"user": None,
"group": None,
}
query = session.query(RoleAssignment)
query = query.filter(
RoleAssignment.role == role, RoleAssignment.object == object
)
if principal is AnonymousRole or (
hasattr(principal, "is_anonymous") and principal.is_anonymous
):
args["anonymous"] = True
query.filter(
RoleAssignment.anonymous == False,
RoleAssignment.user == None,
RoleAssignment.group == None,
)
elif isinstance(principal, User):
args["user"] = principal
query = query.filter(RoleAssignment.user == principal)
else:
args["group"] = principal
query = query.filter(RoleAssignment.group == principal)
ra = query.one()
session.delete(ra)
audit = SecurityAudit(manager=manager, op=SecurityAudit.REVOKE, **args)
session.add(audit)
self._needs_flush()
self._clear_role_cache(principal) |
def filename_from_url(url):
""":return: detected filename or None"""
fname = os.path.basename(urlparse.urlparse(url).path)
if len(fname.strip(" \n\t.")) == 0:
return None
return fname | :return: detected filename or None | Below is the the instruction that describes the task:
### Input:
:return: detected filename or None
### Response:
def filename_from_url(url):
""":return: detected filename or None"""
fname = os.path.basename(urlparse.urlparse(url).path)
if len(fname.strip(" \n\t.")) == 0:
return None
return fname |
def _new_packet_cb(self, packet):
"""Callback for newly arrived packets with TOC information"""
chan = packet.channel
cmd = packet.data[0]
payload = packet.data[1:]
if (chan == CHAN_SETTINGS):
id = payload[0]
error_status = payload[1]
block = self._find_block(id)
if cmd == CMD_CREATE_BLOCK or cmd == CMD_CREATE_BLOCK_V2:
if (block is not None):
if error_status == 0 or error_status == errno.EEXIST:
if not block.added:
logger.debug('Have successfully added id=%d', id)
pk = CRTPPacket()
pk.set_header(5, CHAN_SETTINGS)
pk.data = (CMD_START_LOGGING, id, block.period)
self.cf.send_packet(pk, expected_reply=(
CMD_START_LOGGING, id))
block.added = True
else:
msg = self._err_codes[error_status]
logger.warning('Error %d when adding id=%d (%s)',
error_status, id, msg)
block.err_no = error_status
block.added_cb.call(False)
block.error_cb.call(block, msg)
else:
logger.warning('No LogEntry to assign block to !!!')
if (cmd == CMD_START_LOGGING):
if (error_status == 0x00):
logger.info('Have successfully started logging for id=%d',
id)
if block:
block.started = True
else:
msg = self._err_codes[error_status]
logger.warning('Error %d when starting id=%d (%s)',
error_status, id, msg)
if block:
block.err_no = error_status
block.started_cb.call(self, False)
# This is a temporary fix, we are adding a new issue
# for this. For some reason we get an error back after
# the block has been started and added. This will show
# an error in the UI, but everything is still working.
# block.error_cb.call(block, msg)
if (cmd == CMD_STOP_LOGGING):
if (error_status == 0x00):
logger.info('Have successfully stopped logging for id=%d',
id)
if block:
block.started = False
if (cmd == CMD_DELETE_BLOCK):
# Accept deletion of a block that isn't added. This could
# happen due to timing (i.e add/start/delete in fast sequence)
if error_status == 0x00 or error_status == errno.ENOENT:
logger.info('Have successfully deleted id=%d', id)
if block:
block.started = False
block.added = False
if (cmd == CMD_RESET_LOGGING):
# Guard against multiple responses due to re-sending
if not self.toc:
logger.debug('Logging reset, continue with TOC download')
self.log_blocks = []
self.toc = Toc()
toc_fetcher = TocFetcher(self.cf, LogTocElement,
CRTPPort.LOGGING,
self.toc, self._refresh_callback,
self._toc_cache)
toc_fetcher.start()
if (chan == CHAN_LOGDATA):
chan = packet.channel
id = packet.data[0]
block = self._find_block(id)
timestamps = struct.unpack('<BBB', packet.data[1:4])
timestamp = (
timestamps[0] | timestamps[1] << 8 | timestamps[2] << 16)
logdata = packet.data[4:]
if (block is not None):
block.unpack_log_data(logdata, timestamp)
else:
logger.warning('Error no LogEntry to handle id=%d', id) | Callback for newly arrived packets with TOC information | Below is the the instruction that describes the task:
### Input:
Callback for newly arrived packets with TOC information
### Response:
def _new_packet_cb(self, packet):
"""Callback for newly arrived packets with TOC information"""
chan = packet.channel
cmd = packet.data[0]
payload = packet.data[1:]
if (chan == CHAN_SETTINGS):
id = payload[0]
error_status = payload[1]
block = self._find_block(id)
if cmd == CMD_CREATE_BLOCK or cmd == CMD_CREATE_BLOCK_V2:
if (block is not None):
if error_status == 0 or error_status == errno.EEXIST:
if not block.added:
logger.debug('Have successfully added id=%d', id)
pk = CRTPPacket()
pk.set_header(5, CHAN_SETTINGS)
pk.data = (CMD_START_LOGGING, id, block.period)
self.cf.send_packet(pk, expected_reply=(
CMD_START_LOGGING, id))
block.added = True
else:
msg = self._err_codes[error_status]
logger.warning('Error %d when adding id=%d (%s)',
error_status, id, msg)
block.err_no = error_status
block.added_cb.call(False)
block.error_cb.call(block, msg)
else:
logger.warning('No LogEntry to assign block to !!!')
if (cmd == CMD_START_LOGGING):
if (error_status == 0x00):
logger.info('Have successfully started logging for id=%d',
id)
if block:
block.started = True
else:
msg = self._err_codes[error_status]
logger.warning('Error %d when starting id=%d (%s)',
error_status, id, msg)
if block:
block.err_no = error_status
block.started_cb.call(self, False)
# This is a temporary fix, we are adding a new issue
# for this. For some reason we get an error back after
# the block has been started and added. This will show
# an error in the UI, but everything is still working.
# block.error_cb.call(block, msg)
if (cmd == CMD_STOP_LOGGING):
if (error_status == 0x00):
logger.info('Have successfully stopped logging for id=%d',
id)
if block:
block.started = False
if (cmd == CMD_DELETE_BLOCK):
# Accept deletion of a block that isn't added. This could
# happen due to timing (i.e add/start/delete in fast sequence)
if error_status == 0x00 or error_status == errno.ENOENT:
logger.info('Have successfully deleted id=%d', id)
if block:
block.started = False
block.added = False
if (cmd == CMD_RESET_LOGGING):
# Guard against multiple responses due to re-sending
if not self.toc:
logger.debug('Logging reset, continue with TOC download')
self.log_blocks = []
self.toc = Toc()
toc_fetcher = TocFetcher(self.cf, LogTocElement,
CRTPPort.LOGGING,
self.toc, self._refresh_callback,
self._toc_cache)
toc_fetcher.start()
if (chan == CHAN_LOGDATA):
chan = packet.channel
id = packet.data[0]
block = self._find_block(id)
timestamps = struct.unpack('<BBB', packet.data[1:4])
timestamp = (
timestamps[0] | timestamps[1] << 8 | timestamps[2] << 16)
logdata = packet.data[4:]
if (block is not None):
block.unpack_log_data(logdata, timestamp)
else:
logger.warning('Error no LogEntry to handle id=%d', id) |
def gen_jid(opts=None):
'''
Generate a jid
'''
if opts is None:
salt.utils.versions.warn_until(
'Sodium',
'The `opts` argument was not passed into salt.utils.jid.gen_jid(). '
'This will be required starting in {version}.'
)
opts = {}
global LAST_JID_DATETIME # pylint: disable=global-statement
if opts.get('utc_jid', False):
jid_dt = datetime.datetime.utcnow()
else:
jid_dt = datetime.datetime.now()
if not opts.get('unique_jid', False):
return '{0:%Y%m%d%H%M%S%f}'.format(jid_dt)
if LAST_JID_DATETIME and LAST_JID_DATETIME >= jid_dt:
jid_dt = LAST_JID_DATETIME + datetime.timedelta(microseconds=1)
LAST_JID_DATETIME = jid_dt
return '{0:%Y%m%d%H%M%S%f}_{1}'.format(jid_dt, os.getpid()) | Generate a jid | Below is the the instruction that describes the task:
### Input:
Generate a jid
### Response:
def gen_jid(opts=None):
'''
Generate a jid
'''
if opts is None:
salt.utils.versions.warn_until(
'Sodium',
'The `opts` argument was not passed into salt.utils.jid.gen_jid(). '
'This will be required starting in {version}.'
)
opts = {}
global LAST_JID_DATETIME # pylint: disable=global-statement
if opts.get('utc_jid', False):
jid_dt = datetime.datetime.utcnow()
else:
jid_dt = datetime.datetime.now()
if not opts.get('unique_jid', False):
return '{0:%Y%m%d%H%M%S%f}'.format(jid_dt)
if LAST_JID_DATETIME and LAST_JID_DATETIME >= jid_dt:
jid_dt = LAST_JID_DATETIME + datetime.timedelta(microseconds=1)
LAST_JID_DATETIME = jid_dt
return '{0:%Y%m%d%H%M%S%f}_{1}'.format(jid_dt, os.getpid()) |
def inject(self, other):
"""Add two compatible `Series` along their shared x-axis values.
Parameters
----------
other : `Series`
a `Series` whose xindex intersects with `self.xindex`
Returns
-------
out : `Series`
the sum of `self` and `other` along their shared x-axis values
Raises
------
ValueError
if `self` and `other` have incompatible units or xindex intervals
Notes
-----
If `other.xindex` and `self.xindex` do not intersect, this method will
return a copy of `self`. If the series have uniformly offset indices,
this method will raise a warning.
If `self.xindex` is an array of timestamps, and if `other.xspan` is
not a subset of `self.xspan`, then `other` will be cropped before
being adding to `self`.
Users who wish to taper or window their `Series` should do so before
passing it to this method. See :meth:`TimeSeries.taper` and
:func:`~gwpy.signal.window.planck` for more information.
"""
# check Series compatibility
self.is_compatible(other)
if (self.xunit == second) and (other.xspan[0] < self.xspan[0]):
other = other.crop(start=self.xspan[0])
if (self.xunit == second) and (other.xspan[1] > self.xspan[1]):
other = other.crop(end=self.xspan[1])
ox0 = other.x0.to(self.x0.unit)
idx = ((ox0 - self.x0) / self.dx).value
if not idx.is_integer():
warn('Series have overlapping xspan but their x-axis values are '
'uniformly offset. Returning a copy of the original Series.')
return self.copy()
# add the Series along their shared samples
slice_ = slice(int(idx), int(idx) + other.size)
out = self.copy()
out.value[slice_] += other.value
return out | Add two compatible `Series` along their shared x-axis values.
Parameters
----------
other : `Series`
a `Series` whose xindex intersects with `self.xindex`
Returns
-------
out : `Series`
the sum of `self` and `other` along their shared x-axis values
Raises
------
ValueError
if `self` and `other` have incompatible units or xindex intervals
Notes
-----
If `other.xindex` and `self.xindex` do not intersect, this method will
return a copy of `self`. If the series have uniformly offset indices,
this method will raise a warning.
If `self.xindex` is an array of timestamps, and if `other.xspan` is
not a subset of `self.xspan`, then `other` will be cropped before
being adding to `self`.
Users who wish to taper or window their `Series` should do so before
passing it to this method. See :meth:`TimeSeries.taper` and
:func:`~gwpy.signal.window.planck` for more information. | Below is the the instruction that describes the task:
### Input:
Add two compatible `Series` along their shared x-axis values.
Parameters
----------
other : `Series`
a `Series` whose xindex intersects with `self.xindex`
Returns
-------
out : `Series`
the sum of `self` and `other` along their shared x-axis values
Raises
------
ValueError
if `self` and `other` have incompatible units or xindex intervals
Notes
-----
If `other.xindex` and `self.xindex` do not intersect, this method will
return a copy of `self`. If the series have uniformly offset indices,
this method will raise a warning.
If `self.xindex` is an array of timestamps, and if `other.xspan` is
not a subset of `self.xspan`, then `other` will be cropped before
being adding to `self`.
Users who wish to taper or window their `Series` should do so before
passing it to this method. See :meth:`TimeSeries.taper` and
:func:`~gwpy.signal.window.planck` for more information.
### Response:
def inject(self, other):
"""Add two compatible `Series` along their shared x-axis values.
Parameters
----------
other : `Series`
a `Series` whose xindex intersects with `self.xindex`
Returns
-------
out : `Series`
the sum of `self` and `other` along their shared x-axis values
Raises
------
ValueError
if `self` and `other` have incompatible units or xindex intervals
Notes
-----
If `other.xindex` and `self.xindex` do not intersect, this method will
return a copy of `self`. If the series have uniformly offset indices,
this method will raise a warning.
If `self.xindex` is an array of timestamps, and if `other.xspan` is
not a subset of `self.xspan`, then `other` will be cropped before
being adding to `self`.
Users who wish to taper or window their `Series` should do so before
passing it to this method. See :meth:`TimeSeries.taper` and
:func:`~gwpy.signal.window.planck` for more information.
"""
# check Series compatibility
self.is_compatible(other)
if (self.xunit == second) and (other.xspan[0] < self.xspan[0]):
other = other.crop(start=self.xspan[0])
if (self.xunit == second) and (other.xspan[1] > self.xspan[1]):
other = other.crop(end=self.xspan[1])
ox0 = other.x0.to(self.x0.unit)
idx = ((ox0 - self.x0) / self.dx).value
if not idx.is_integer():
warn('Series have overlapping xspan but their x-axis values are '
'uniformly offset. Returning a copy of the original Series.')
return self.copy()
# add the Series along their shared samples
slice_ = slice(int(idx), int(idx) + other.size)
out = self.copy()
out.value[slice_] += other.value
return out |
def _read_from_paths():
"""
Try to read data from configuration paths ($HOME/_SETTINGS_PATH,
/etc/_SETTINGS_PATH).
"""
home = os.environ.get("HOME", "/")
home_path = os.path.join(home, _SETTINGS_PATH)
etc_path = os.path.join("/etc", _SETTINGS_PATH)
read_path = None
if home and os.path.exists(home_path):
read_path = home_path
elif os.path.exists(etc_path):
read_path = etc_path
if read_path:
with open(read_path) as f:
_substitute_globals(
json.loads(f.read())
) | Try to read data from configuration paths ($HOME/_SETTINGS_PATH,
/etc/_SETTINGS_PATH). | Below is the the instruction that describes the task:
### Input:
Try to read data from configuration paths ($HOME/_SETTINGS_PATH,
/etc/_SETTINGS_PATH).
### Response:
def _read_from_paths():
"""
Try to read data from configuration paths ($HOME/_SETTINGS_PATH,
/etc/_SETTINGS_PATH).
"""
home = os.environ.get("HOME", "/")
home_path = os.path.join(home, _SETTINGS_PATH)
etc_path = os.path.join("/etc", _SETTINGS_PATH)
read_path = None
if home and os.path.exists(home_path):
read_path = home_path
elif os.path.exists(etc_path):
read_path = etc_path
if read_path:
with open(read_path) as f:
_substitute_globals(
json.loads(f.read())
) |
def format_time_small(seconds):
"""
Same as format_time() but always uses SI-prefix and 3 significant figures.
"""
if not isinstance(seconds, (int, float)):
return str(seconds)
if math.isnan(seconds):
return "-"
if abs(seconds)<1:
milliseconds = 1000*seconds
if abs(milliseconds)<1:
microseconds = 1000*milliseconds
if abs(microseconds)<1:
nanoseconds = 1000*microseconds
if abs(nanoseconds)<0.5:
return "0"
else:
return "{:.0f}ns".format(nanoseconds)
elif abs(microseconds)<10:
return "{:.2f}us".format(microseconds)
elif abs(microseconds)<100:
return "{:.1f}us".format(microseconds)
else:
return "{:.0f}us".format(microseconds)
elif abs(milliseconds)<10:
return "{:.2f}ms".format(milliseconds)
elif abs(milliseconds)<100:
return "{:.1f}ms".format(milliseconds)
else:
return "{:.0f}ms".format(milliseconds)
elif abs(seconds)<10:
return "{:.2f}s".format(seconds)
elif abs(seconds)<100:
return "{:.1f}s".format(seconds)
else:
return "{:.0f}s".format(seconds) | Same as format_time() but always uses SI-prefix and 3 significant figures. | Below is the the instruction that describes the task:
### Input:
Same as format_time() but always uses SI-prefix and 3 significant figures.
### Response:
def format_time_small(seconds):
"""
Same as format_time() but always uses SI-prefix and 3 significant figures.
"""
if not isinstance(seconds, (int, float)):
return str(seconds)
if math.isnan(seconds):
return "-"
if abs(seconds)<1:
milliseconds = 1000*seconds
if abs(milliseconds)<1:
microseconds = 1000*milliseconds
if abs(microseconds)<1:
nanoseconds = 1000*microseconds
if abs(nanoseconds)<0.5:
return "0"
else:
return "{:.0f}ns".format(nanoseconds)
elif abs(microseconds)<10:
return "{:.2f}us".format(microseconds)
elif abs(microseconds)<100:
return "{:.1f}us".format(microseconds)
else:
return "{:.0f}us".format(microseconds)
elif abs(milliseconds)<10:
return "{:.2f}ms".format(milliseconds)
elif abs(milliseconds)<100:
return "{:.1f}ms".format(milliseconds)
else:
return "{:.0f}ms".format(milliseconds)
elif abs(seconds)<10:
return "{:.2f}s".format(seconds)
elif abs(seconds)<100:
return "{:.1f}s".format(seconds)
else:
return "{:.0f}s".format(seconds) |
def symbol(self):
"""Gets the symbol under the current cursor."""
if self._symbol is None:
self._symbol = self._symbol_extract(cache.RE_CURSOR)
return self._symbol | Gets the symbol under the current cursor. | Below is the the instruction that describes the task:
### Input:
Gets the symbol under the current cursor.
### Response:
def symbol(self):
"""Gets the symbol under the current cursor."""
if self._symbol is None:
self._symbol = self._symbol_extract(cache.RE_CURSOR)
return self._symbol |
def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
node_ids = []
center_lat = None
center_lon = None
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "nd":
ref_id = sub_child.attrib.get("ref")
if ref_id is None:
raise ValueError("Unable to find required ref value.")
ref_id = int(ref_id)
node_ids.append(ref_id)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
way_id = child.attrib.get("id")
if way_id is not None:
way_id = int(way_id)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(way_id=way_id, center_lat=center_lat, center_lon=center_lon,
attributes=attributes, node_ids=node_ids, tags=tags, result=result) | Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name | Below is the the instruction that describes the task:
### Input:
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
### Response:
def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
node_ids = []
center_lat = None
center_lon = None
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "nd":
ref_id = sub_child.attrib.get("ref")
if ref_id is None:
raise ValueError("Unable to find required ref value.")
ref_id = int(ref_id)
node_ids.append(ref_id)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
way_id = child.attrib.get("id")
if way_id is not None:
way_id = int(way_id)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(way_id=way_id, center_lat=center_lat, center_lon=center_lon,
attributes=attributes, node_ids=node_ids, tags=tags, result=result) |
async def grant_model(self, username, model_uuid, acl='read'):
"""Grant a user access to a model. Note that if the user
already has higher permissions than the provided ACL,
this will do nothing (see revoke_model for a way to remove
permissions).
:param str username: Username
:param str model_uuid: The UUID of the model to change.
:param str acl: Access control ('read, 'write' or 'admin')
"""
model_facade = client.ModelManagerFacade.from_connection(
self.connection())
user = tag.user(username)
model = tag.model(model_uuid)
changes = client.ModifyModelAccess(acl, 'grant', model, user)
return await model_facade.ModifyModelAccess([changes]) | Grant a user access to a model. Note that if the user
already has higher permissions than the provided ACL,
this will do nothing (see revoke_model for a way to remove
permissions).
:param str username: Username
:param str model_uuid: The UUID of the model to change.
:param str acl: Access control ('read, 'write' or 'admin') | Below is the the instruction that describes the task:
### Input:
Grant a user access to a model. Note that if the user
already has higher permissions than the provided ACL,
this will do nothing (see revoke_model for a way to remove
permissions).
:param str username: Username
:param str model_uuid: The UUID of the model to change.
:param str acl: Access control ('read, 'write' or 'admin')
### Response:
async def grant_model(self, username, model_uuid, acl='read'):
"""Grant a user access to a model. Note that if the user
already has higher permissions than the provided ACL,
this will do nothing (see revoke_model for a way to remove
permissions).
:param str username: Username
:param str model_uuid: The UUID of the model to change.
:param str acl: Access control ('read, 'write' or 'admin')
"""
model_facade = client.ModelManagerFacade.from_connection(
self.connection())
user = tag.user(username)
model = tag.model(model_uuid)
changes = client.ModifyModelAccess(acl, 'grant', model, user)
return await model_facade.ModifyModelAccess([changes]) |
def extract_table_identifiers(token_stream):
"""yields tuples of (schema_name, table_name, table_alias)"""
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
# Sometimes Keywords (such as FROM ) are classified as
# identifiers which don't have the get_real_name() method.
try:
schema_name = identifier.get_parent_name()
real_name = identifier.get_real_name()
except AttributeError:
continue
if real_name:
yield (schema_name, real_name, identifier.get_alias())
elif isinstance(item, Identifier):
real_name = item.get_real_name()
schema_name = item.get_parent_name()
if real_name:
yield (schema_name, real_name, item.get_alias())
else:
name = item.get_name()
yield (None, name, item.get_alias() or name)
elif isinstance(item, Function):
yield (None, item.get_name(), item.get_name()) | yields tuples of (schema_name, table_name, table_alias) | Below is the the instruction that describes the task:
### Input:
yields tuples of (schema_name, table_name, table_alias)
### Response:
def extract_table_identifiers(token_stream):
"""yields tuples of (schema_name, table_name, table_alias)"""
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
# Sometimes Keywords (such as FROM ) are classified as
# identifiers which don't have the get_real_name() method.
try:
schema_name = identifier.get_parent_name()
real_name = identifier.get_real_name()
except AttributeError:
continue
if real_name:
yield (schema_name, real_name, identifier.get_alias())
elif isinstance(item, Identifier):
real_name = item.get_real_name()
schema_name = item.get_parent_name()
if real_name:
yield (schema_name, real_name, item.get_alias())
else:
name = item.get_name()
yield (None, name, item.get_alias() or name)
elif isinstance(item, Function):
yield (None, item.get_name(), item.get_name()) |
def scale_out(self, blocks=1):
"""Scales out the number of active workers by 1.
This method is notImplemented for threads and will raise the error if called.
Parameters:
blocks : int
Number of blocks to be provisioned.
"""
r = []
for i in range(blocks):
if self.provider:
block = self.provider.submit(self.launch_cmd, 1, self.workers_per_node)
logger.debug("Launched block {}:{}".format(i, block))
if not block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
self.engines.extend([block])
r.extend([block])
else:
logger.error("No execution provider available")
r = None
return r | Scales out the number of active workers by 1.
This method is notImplemented for threads and will raise the error if called.
Parameters:
blocks : int
Number of blocks to be provisioned. | Below is the the instruction that describes the task:
### Input:
Scales out the number of active workers by 1.
This method is notImplemented for threads and will raise the error if called.
Parameters:
blocks : int
Number of blocks to be provisioned.
### Response:
def scale_out(self, blocks=1):
"""Scales out the number of active workers by 1.
This method is notImplemented for threads and will raise the error if called.
Parameters:
blocks : int
Number of blocks to be provisioned.
"""
r = []
for i in range(blocks):
if self.provider:
block = self.provider.submit(self.launch_cmd, 1, self.workers_per_node)
logger.debug("Launched block {}:{}".format(i, block))
if not block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
self.engines.extend([block])
r.extend([block])
else:
logger.error("No execution provider available")
r = None
return r |
def merge_lines(top, bot, icod="top"):
"""
Merges two lines (top and bot) in the way that the overlapping make senses.
Args:
top (str): the top line
bot (str): the bottom line
icod (top or bot): in case of doubt, which line should have priority? Default: "top".
Returns:
str: The merge of both lines.
"""
ret = ""
for topc, botc in zip(top, bot):
if topc == botc:
ret += topc
elif topc in '┼╪' and botc == " ":
ret += "│"
elif topc == " ":
ret += botc
elif topc in '┬╥' and botc in " ║│" and icod == "top":
ret += topc
elif topc in '┬' and botc == " " and icod == "bot":
ret += '│'
elif topc in '╥' and botc == " " and icod == "bot":
ret += '║'
elif topc in '┬│' and botc == "═":
ret += '╪'
elif topc in '┬│' and botc == "─":
ret += '┼'
elif topc in '└┘║│░' and botc == " " and icod == "top":
ret += topc
elif topc in '─═' and botc == " " and icod == "top":
ret += topc
elif topc in '─═' and botc == " " and icod == "bot":
ret += botc
elif topc in "║╥" and botc in "═":
ret += "╬"
elif topc in "║╥" and botc in "─":
ret += "╫"
elif topc in '╫╬' and botc in " ":
ret += "║"
elif topc == '└' and botc == "┌":
ret += "├"
elif topc == '┘' and botc == "┐":
ret += "┤"
elif botc in "┐┌" and icod == 'top':
ret += "┬"
elif topc in "┘└" and botc in "─" and icod == 'top':
ret += "┴"
else:
ret += botc
return ret | Merges two lines (top and bot) in the way that the overlapping make senses.
Args:
top (str): the top line
bot (str): the bottom line
icod (top or bot): in case of doubt, which line should have priority? Default: "top".
Returns:
str: The merge of both lines. | Below is the the instruction that describes the task:
### Input:
Merges two lines (top and bot) in the way that the overlapping make senses.
Args:
top (str): the top line
bot (str): the bottom line
icod (top or bot): in case of doubt, which line should have priority? Default: "top".
Returns:
str: The merge of both lines.
### Response:
def merge_lines(top, bot, icod="top"):
"""
Merges two lines (top and bot) in the way that the overlapping make senses.
Args:
top (str): the top line
bot (str): the bottom line
icod (top or bot): in case of doubt, which line should have priority? Default: "top".
Returns:
str: The merge of both lines.
"""
ret = ""
for topc, botc in zip(top, bot):
if topc == botc:
ret += topc
elif topc in '┼╪' and botc == " ":
ret += "│"
elif topc == " ":
ret += botc
elif topc in '┬╥' and botc in " ║│" and icod == "top":
ret += topc
elif topc in '┬' and botc == " " and icod == "bot":
ret += '│'
elif topc in '╥' and botc == " " and icod == "bot":
ret += '║'
elif topc in '┬│' and botc == "═":
ret += '╪'
elif topc in '┬│' and botc == "─":
ret += '┼'
elif topc in '└┘║│░' and botc == " " and icod == "top":
ret += topc
elif topc in '─═' and botc == " " and icod == "top":
ret += topc
elif topc in '─═' and botc == " " and icod == "bot":
ret += botc
elif topc in "║╥" and botc in "═":
ret += "╬"
elif topc in "║╥" and botc in "─":
ret += "╫"
elif topc in '╫╬' and botc in " ":
ret += "║"
elif topc == '└' and botc == "┌":
ret += "├"
elif topc == '┘' and botc == "┐":
ret += "┤"
elif botc in "┐┌" and icod == 'top':
ret += "┬"
elif topc in "┘└" and botc in "─" and icod == 'top':
ret += "┴"
else:
ret += botc
return ret |
def _combine_w0614(self, messages):
"""
For the "unused import from wildcard import" messages,
we want to combine all warnings about the same line into
a single message.
"""
by_loc = defaultdict(list)
out = []
for message in messages:
if message.code == 'unused-wildcard-import':
by_loc[message.location].append(message)
else:
out.append(message)
for location, message_list in by_loc.items():
names = []
for msg in message_list:
names.append(
_UNUSED_WILDCARD_IMPORT_RE.match(msg.message).group(1))
msgtxt = 'Unused imports from wildcard import: %s' % ', '.join(
names)
combined_message = Message('pylint', 'unused-wildcard-import',
location, msgtxt)
out.append(combined_message)
return out | For the "unused import from wildcard import" messages,
we want to combine all warnings about the same line into
a single message. | Below is the the instruction that describes the task:
### Input:
For the "unused import from wildcard import" messages,
we want to combine all warnings about the same line into
a single message.
### Response:
def _combine_w0614(self, messages):
"""
For the "unused import from wildcard import" messages,
we want to combine all warnings about the same line into
a single message.
"""
by_loc = defaultdict(list)
out = []
for message in messages:
if message.code == 'unused-wildcard-import':
by_loc[message.location].append(message)
else:
out.append(message)
for location, message_list in by_loc.items():
names = []
for msg in message_list:
names.append(
_UNUSED_WILDCARD_IMPORT_RE.match(msg.message).group(1))
msgtxt = 'Unused imports from wildcard import: %s' % ', '.join(
names)
combined_message = Message('pylint', 'unused-wildcard-import',
location, msgtxt)
out.append(combined_message)
return out |
def remove_repositories(repositories, default_repositories):
"""
Remove no default repositories
"""
repos = []
for repo in repositories:
if repo in default_repositories:
repos.append(repo)
return repos | Remove no default repositories | Below is the the instruction that describes the task:
### Input:
Remove no default repositories
### Response:
def remove_repositories(repositories, default_repositories):
"""
Remove no default repositories
"""
repos = []
for repo in repositories:
if repo in default_repositories:
repos.append(repo)
return repos |
def matrix_multiplication_blockwise(self, matrix, blocksize):
"""
http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication
"""
#Create the blockwise version of self and matrix
selfBlockwise = self.matrix_to_blockmatrix(blocksize)
matrixBlockwise = matrix.matrix_to_blockmatrix(blocksize)
return (selfBlockwise * matrixBlockwise).flatten() | http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication | Below is the the instruction that describes the task:
### Input:
http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication
### Response:
def matrix_multiplication_blockwise(self, matrix, blocksize):
"""
http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication
"""
#Create the blockwise version of self and matrix
selfBlockwise = self.matrix_to_blockmatrix(blocksize)
matrixBlockwise = matrix.matrix_to_blockmatrix(blocksize)
return (selfBlockwise * matrixBlockwise).flatten() |
def analyze(self):
"""Run complete anaylysis and return results."""
self.calculate_cache_access()
self.calculate_cycles()
self.results['flops per iteration'] = sum(self.kernel._flops.values())
return self.results | Run complete anaylysis and return results. | Below is the the instruction that describes the task:
### Input:
Run complete anaylysis and return results.
### Response:
def analyze(self):
"""Run complete anaylysis and return results."""
self.calculate_cache_access()
self.calculate_cycles()
self.results['flops per iteration'] = sum(self.kernel._flops.values())
return self.results |
def random_like(ary=None, shape=None, dtype=None):
"""
Returns a random array of the same shape and type as the
supplied array argument, or the supplied shape and dtype
"""
if ary is not None:
shape, dtype = ary.shape, ary.dtype
elif shape is None or dtype is None:
raise ValueError((
'random_like(ary, shape, dtype) must be supplied '
'with either an array argument, or the shape and dtype '
'of the desired random array.'))
if np.issubdtype(dtype, np.complexfloating):
return (np.random.random(size=shape) + \
np.random.random(size=shape)*1j).astype(dtype)
else:
return np.random.random(size=shape).astype(dtype) | Returns a random array of the same shape and type as the
supplied array argument, or the supplied shape and dtype | Below is the the instruction that describes the task:
### Input:
Returns a random array of the same shape and type as the
supplied array argument, or the supplied shape and dtype
### Response:
def random_like(ary=None, shape=None, dtype=None):
"""
Returns a random array of the same shape and type as the
supplied array argument, or the supplied shape and dtype
"""
if ary is not None:
shape, dtype = ary.shape, ary.dtype
elif shape is None or dtype is None:
raise ValueError((
'random_like(ary, shape, dtype) must be supplied '
'with either an array argument, or the shape and dtype '
'of the desired random array.'))
if np.issubdtype(dtype, np.complexfloating):
return (np.random.random(size=shape) + \
np.random.random(size=shape)*1j).astype(dtype)
else:
return np.random.random(size=shape).astype(dtype) |
def dummy_func(arg1, arg2, arg3=None, arg4=[1, 2, 3], arg5={}, **kwargs):
"""
test func for kwargs parseing
"""
foo = kwargs.get('foo', None)
bar = kwargs.pop('bar', 4)
foo2 = kwargs['foo2']
foobar = str(foo) + str(bar) + str(foo2)
return foobar | test func for kwargs parseing | Below is the the instruction that describes the task:
### Input:
test func for kwargs parseing
### Response:
def dummy_func(arg1, arg2, arg3=None, arg4=[1, 2, 3], arg5={}, **kwargs):
"""
test func for kwargs parseing
"""
foo = kwargs.get('foo', None)
bar = kwargs.pop('bar', 4)
foo2 = kwargs['foo2']
foobar = str(foo) + str(bar) + str(foo2)
return foobar |
def match_function_id(self, function_id, match):
"""Matches the function identified by the given ``Id``.
arg: function_id (osid.id.Id): the Id of the ``Function``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``function_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._add_match('functionId', str(function_id), bool(match)) | Matches the function identified by the given ``Id``.
arg: function_id (osid.id.Id): the Id of the ``Function``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``function_id`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Matches the function identified by the given ``Id``.
arg: function_id (osid.id.Id): the Id of the ``Function``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``function_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def match_function_id(self, function_id, match):
"""Matches the function identified by the given ``Id``.
arg: function_id (osid.id.Id): the Id of the ``Function``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``function_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._add_match('functionId', str(function_id), bool(match)) |
def build_annotation_dict_any_filter(annotations: Mapping[str, Iterable[str]]) -> EdgePredicate:
"""Build an edge predicate that passes for edges whose data dictionaries match the given dictionary.
If the given dictionary is empty, will always evaluate to true.
:param annotations: The annotation query dict to match
"""
if not annotations:
return keep_edge_permissive
@edge_predicate
def annotation_dict_any_filter(edge_data: EdgeData) -> bool:
"""Check if the any of the annotations in the enclosed query match."""
return _annotation_dict_any_filter(edge_data, query=annotations)
return annotation_dict_any_filter | Build an edge predicate that passes for edges whose data dictionaries match the given dictionary.
If the given dictionary is empty, will always evaluate to true.
:param annotations: The annotation query dict to match | Below is the the instruction that describes the task:
### Input:
Build an edge predicate that passes for edges whose data dictionaries match the given dictionary.
If the given dictionary is empty, will always evaluate to true.
:param annotations: The annotation query dict to match
### Response:
def build_annotation_dict_any_filter(annotations: Mapping[str, Iterable[str]]) -> EdgePredicate:
"""Build an edge predicate that passes for edges whose data dictionaries match the given dictionary.
If the given dictionary is empty, will always evaluate to true.
:param annotations: The annotation query dict to match
"""
if not annotations:
return keep_edge_permissive
@edge_predicate
def annotation_dict_any_filter(edge_data: EdgeData) -> bool:
"""Check if the any of the annotations in the enclosed query match."""
return _annotation_dict_any_filter(edge_data, query=annotations)
return annotation_dict_any_filter |
def unlink_parameter(self, param):
"""
:param param: param object to remove from being a parameter of this parameterized object.
"""
if not param in self.parameters:
try:
raise HierarchyError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name))
except AttributeError:
raise HierarchyError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param)))
start = sum([p.size for p in self.parameters[:param._parent_index_]])
self.size -= param.size
del self.parameters[param._parent_index_]
self._remove_parameter_name(param)
param._disconnect_parent()
param.remove_observer(self, self._pass_through_notify_observers)
for name, iop in self._index_operations.items():
iop.shift_left(start, param.size)
self._connect_parameters()
self._notify_parent_change()
parent = self._parent_
while parent is not None:
parent.size -= param.size
parent = parent._parent_
self._highest_parent_._connect_parameters()
self._highest_parent_._connect_fixes()
self._highest_parent_._notify_parent_change() | :param param: param object to remove from being a parameter of this parameterized object. | Below is the the instruction that describes the task:
### Input:
:param param: param object to remove from being a parameter of this parameterized object.
### Response:
def unlink_parameter(self, param):
"""
:param param: param object to remove from being a parameter of this parameterized object.
"""
if not param in self.parameters:
try:
raise HierarchyError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name))
except AttributeError:
raise HierarchyError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param)))
start = sum([p.size for p in self.parameters[:param._parent_index_]])
self.size -= param.size
del self.parameters[param._parent_index_]
self._remove_parameter_name(param)
param._disconnect_parent()
param.remove_observer(self, self._pass_through_notify_observers)
for name, iop in self._index_operations.items():
iop.shift_left(start, param.size)
self._connect_parameters()
self._notify_parent_change()
parent = self._parent_
while parent is not None:
parent.size -= param.size
parent = parent._parent_
self._highest_parent_._connect_parameters()
self._highest_parent_._connect_fixes()
self._highest_parent_._notify_parent_change() |
def alpha_gen(x):
""" Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
Docstring for the generated alpha function::
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
def alpha_(xmin,x=x):
"""
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
gexmin = x>=xmin
n = np.count_nonzero(gexmin)
if n < 2:
return 0
x = x[gexmin]
a = 1 + float(n) / sum(log(x/xmin))
return a
return alpha_ | Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
Docstring for the generated alpha function::
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0 | Below is the the instruction that describes the task:
### Input:
Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
Docstring for the generated alpha function::
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
### Response:
def alpha_gen(x):
""" Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
Docstring for the generated alpha function::
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
def alpha_(xmin,x=x):
"""
Given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
If there is only one element, return alpha=0
"""
gexmin = x>=xmin
n = np.count_nonzero(gexmin)
if n < 2:
return 0
x = x[gexmin]
a = 1 + float(n) / sum(log(x/xmin))
return a
return alpha_ |
def marginal_counts(counts, meas_qubits):
"""
Compute the marginal counts for a subset of measured qubits.
Args:
counts (dict): the counts returned from a backend ({str: int}).
meas_qubits (list[int]): the qubits to return the marginal
counts distribution for.
Returns:
dict: A counts dict for the meas_qubits.abs
Example: if `counts = {'00': 10, '01': 5}`
`marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.
`marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`.
"""
# pylint: disable=cell-var-from-loop
# Extract total number of qubits from count keys
num_of_qubits = len(list(counts.keys())[0])
# keys for measured qubits only
qs = sorted(meas_qubits, reverse=True)
meas_keys = count_keys(len(qs))
# get regex match strings for summing outcomes of other qubits
rgx = [
reduce(lambda x, y: (key[qs.index(y)] if y in qs else '\\d') + x,
range(num_of_qubits), '') for key in meas_keys
]
# build the return list
meas_counts = []
for m in rgx:
c = 0
for key, val in counts.items():
if match(m, key):
c += val
meas_counts.append(c)
# return as counts dict on measured qubits only
return dict(zip(meas_keys, meas_counts)) | Compute the marginal counts for a subset of measured qubits.
Args:
counts (dict): the counts returned from a backend ({str: int}).
meas_qubits (list[int]): the qubits to return the marginal
counts distribution for.
Returns:
dict: A counts dict for the meas_qubits.abs
Example: if `counts = {'00': 10, '01': 5}`
`marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.
`marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`. | Below is the the instruction that describes the task:
### Input:
Compute the marginal counts for a subset of measured qubits.
Args:
counts (dict): the counts returned from a backend ({str: int}).
meas_qubits (list[int]): the qubits to return the marginal
counts distribution for.
Returns:
dict: A counts dict for the meas_qubits.abs
Example: if `counts = {'00': 10, '01': 5}`
`marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.
`marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`.
### Response:
def marginal_counts(counts, meas_qubits):
"""
Compute the marginal counts for a subset of measured qubits.
Args:
counts (dict): the counts returned from a backend ({str: int}).
meas_qubits (list[int]): the qubits to return the marginal
counts distribution for.
Returns:
dict: A counts dict for the meas_qubits.abs
Example: if `counts = {'00': 10, '01': 5}`
`marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.
`marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`.
"""
# pylint: disable=cell-var-from-loop
# Extract total number of qubits from count keys
num_of_qubits = len(list(counts.keys())[0])
# keys for measured qubits only
qs = sorted(meas_qubits, reverse=True)
meas_keys = count_keys(len(qs))
# get regex match strings for summing outcomes of other qubits
rgx = [
reduce(lambda x, y: (key[qs.index(y)] if y in qs else '\\d') + x,
range(num_of_qubits), '') for key in meas_keys
]
# build the return list
meas_counts = []
for m in rgx:
c = 0
for key, val in counts.items():
if match(m, key):
c += val
meas_counts.append(c)
# return as counts dict on measured qubits only
return dict(zip(meas_keys, meas_counts)) |
def options(self, session=None):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
cursor = dbo.list_collections(
session=session, filter={"name": self.__name})
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options | Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter. | Below is the the instruction that describes the task:
### Input:
Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
### Response:
def options(self, session=None):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
cursor = dbo.list_collections(
session=session, filter={"name": self.__name})
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options |
def match(self):
"""
Used to get exploitable result of re.search
:return: The data of the match status.
:rtype: mixed
"""
# We initate this variable which gonna contain the returned data
result = []
# We compile the regex string
to_match = comp(self.regex)
# In case we have to use the implementation of ${BASH_REMATCH} we use
# re.findall otherwise, we use re.search
if self.rematch: # pylint: disable=no-member
pre_result = to_match.findall(self.data)
else:
pre_result = to_match.search(self.data)
if self.return_data and pre_result: # pylint: disable=no-member
if self.rematch: # pylint: disable=no-member
for data in pre_result:
if isinstance(data, tuple):
result.extend(list(data))
else:
result.append(data)
if self.group != 0: # pylint: disable=no-member
return result[self.group] # pylint: disable=no-member
else:
result = pre_result.group(
self.group # pylint: disable=no-member
).strip()
return result
if not self.return_data and pre_result: # pylint: disable=no-member
return True
return False | Used to get exploitable result of re.search
:return: The data of the match status.
:rtype: mixed | Below is the the instruction that describes the task:
### Input:
Used to get exploitable result of re.search
:return: The data of the match status.
:rtype: mixed
### Response:
def match(self):
"""
Used to get exploitable result of re.search
:return: The data of the match status.
:rtype: mixed
"""
# We initate this variable which gonna contain the returned data
result = []
# We compile the regex string
to_match = comp(self.regex)
# In case we have to use the implementation of ${BASH_REMATCH} we use
# re.findall otherwise, we use re.search
if self.rematch: # pylint: disable=no-member
pre_result = to_match.findall(self.data)
else:
pre_result = to_match.search(self.data)
if self.return_data and pre_result: # pylint: disable=no-member
if self.rematch: # pylint: disable=no-member
for data in pre_result:
if isinstance(data, tuple):
result.extend(list(data))
else:
result.append(data)
if self.group != 0: # pylint: disable=no-member
return result[self.group] # pylint: disable=no-member
else:
result = pre_result.group(
self.group # pylint: disable=no-member
).strip()
return result
if not self.return_data and pre_result: # pylint: disable=no-member
return True
return False |
def editMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photosets.editMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_dopost(method, auth=True, title=title, \
description=description, photoset_id=self.id)
self.__title = title
self.__description = description
return True | Set metadata for photo. (flickr.photos.setMeta) | Below is the the instruction that describes the task:
### Input:
Set metadata for photo. (flickr.photos.setMeta)
### Response:
def editMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photosets.editMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_dopost(method, auth=True, title=title, \
description=description, photoset_id=self.id)
self.__title = title
self.__description = description
return True |
def get(self, block=True, timeout=None):
"""get."""
try:
item = super().get(block, timeout)
self._getsocket.recv(1)
return item
except queue.Empty:
raise queue.Empty | get. | Below is the the instruction that describes the task:
### Input:
get.
### Response:
def get(self, block=True, timeout=None):
"""get."""
try:
item = super().get(block, timeout)
self._getsocket.recv(1)
return item
except queue.Empty:
raise queue.Empty |
def get_long_task_info(self, long_task_id, expand=None, callback=None):
"""
Returns information about a long-running task.
:param long_task_id (string): The key of the task to be returned.
:param expand (string): A comma separated list of properties to expand on the task. Default: Empty
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the longtask/{id} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
"""
params = {}
if expand:
params["expand"] = expand
return self._service_get_request("rest/api/longtask/{id}".format(id=long_task_id), params=params,
callback=callback) | Returns information about a long-running task.
:param long_task_id (string): The key of the task to be returned.
:param expand (string): A comma separated list of properties to expand on the task. Default: Empty
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the longtask/{id} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially. | Below is the the instruction that describes the task:
### Input:
Returns information about a long-running task.
:param long_task_id (string): The key of the task to be returned.
:param expand (string): A comma separated list of properties to expand on the task. Default: Empty
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the longtask/{id} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
### Response:
def get_long_task_info(self, long_task_id, expand=None, callback=None):
"""
Returns information about a long-running task.
:param long_task_id (string): The key of the task to be returned.
:param expand (string): A comma separated list of properties to expand on the task. Default: Empty
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the longtask/{id} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
"""
params = {}
if expand:
params["expand"] = expand
return self._service_get_request("rest/api/longtask/{id}".format(id=long_task_id), params=params,
callback=callback) |
def env_string(name, required=False, default=empty):
"""Pulls an environment variable out of the environment returning it as a
string. If not present in the environment and no default is specified, an
empty string is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, default=default, required=required)
if value is empty:
value = ''
return value | Pulls an environment variable out of the environment returning it as a
string. If not present in the environment and no default is specified, an
empty string is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool | Below is the the instruction that describes the task:
### Input:
Pulls an environment variable out of the environment returning it as a
string. If not present in the environment and no default is specified, an
empty string is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
### Response:
def env_string(name, required=False, default=empty):
"""Pulls an environment variable out of the environment returning it as a
string. If not present in the environment and no default is specified, an
empty string is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, default=default, required=required)
if value is empty:
value = ''
return value |
def method(self, method):
"""Sets the method of this Notificant.
The notification method used for notification target. # noqa: E501
:param method: The method of this Notificant. # noqa: E501
:type: str
"""
if method is None:
raise ValueError("Invalid value for `method`, must not be `None`") # noqa: E501
allowed_values = ["WEBHOOK", "EMAIL", "PAGERDUTY"] # noqa: E501
if method not in allowed_values:
raise ValueError(
"Invalid value for `method` ({0}), must be one of {1}" # noqa: E501
.format(method, allowed_values)
)
self._method = method | Sets the method of this Notificant.
The notification method used for notification target. # noqa: E501
:param method: The method of this Notificant. # noqa: E501
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the method of this Notificant.
The notification method used for notification target. # noqa: E501
:param method: The method of this Notificant. # noqa: E501
:type: str
### Response:
def method(self, method):
"""Sets the method of this Notificant.
The notification method used for notification target. # noqa: E501
:param method: The method of this Notificant. # noqa: E501
:type: str
"""
if method is None:
raise ValueError("Invalid value for `method`, must not be `None`") # noqa: E501
allowed_values = ["WEBHOOK", "EMAIL", "PAGERDUTY"] # noqa: E501
if method not in allowed_values:
raise ValueError(
"Invalid value for `method` ({0}), must be one of {1}" # noqa: E501
.format(method, allowed_values)
)
self._method = method |
def warn(self, cmd, desc=''):
''' Style for warning message. '''
return self._label_desc(cmd, desc, self.warn_color) | Style for warning message. | Below is the the instruction that describes the task:
### Input:
Style for warning message.
### Response:
def warn(self, cmd, desc=''):
''' Style for warning message. '''
return self._label_desc(cmd, desc, self.warn_color) |
def _execute_trade_cmd(
self, trade_cmd, users, expire_seconds, entrust_prop, send_interval
):
"""分发交易指令到对应的 user 并执行
:param trade_cmd:
:param users:
:param expire_seconds:
:param entrust_prop:
:param send_interval:
:return:
"""
for user in users:
# check expire
now = datetime.datetime.now()
expire = (now - trade_cmd["datetime"]).total_seconds()
if expire > expire_seconds:
log.warning(
"策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格: %s)超时,指令产生时间: %s 当前时间: %s, 超过设置的最大过期时间 %s 秒, 被丢弃",
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
trade_cmd["price"],
trade_cmd["datetime"],
now,
expire_seconds,
)
break
# check price
price = trade_cmd["price"]
if not self._is_number(price) or price <= 0:
log.warning(
"策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格: %s)超时,指令产生时间: %s 当前时间: %s, 价格无效 , 被丢弃",
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
trade_cmd["price"],
trade_cmd["datetime"],
now,
)
break
# check amount
if trade_cmd["amount"] <= 0:
log.warning(
"策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格: %s)超时,指令产生时间: %s 当前时间: %s, 买入股数无效 , 被丢弃",
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
trade_cmd["price"],
trade_cmd["datetime"],
now,
)
break
actual_price = self._calculate_price_by_slippage(
trade_cmd["action"], trade_cmd["price"]
)
args = {
"security": trade_cmd["stock_code"],
"price": actual_price,
"amount": trade_cmd["amount"],
"entrust_prop": entrust_prop,
}
try:
response = getattr(user, trade_cmd["action"])(**args)
except exceptions.TradeError as e:
trader_name = type(user).__name__
err_msg = "{}: {}".format(type(e).__name__, e.args)
log.error(
"%s 执行 策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格(考虑滑点): %s 指令产生时间: %s) 失败, 错误信息: %s",
trader_name,
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
actual_price,
trade_cmd["datetime"],
err_msg,
)
else:
log.info(
"策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格(考虑滑点): %s 指令产生时间: %s) 执行成功, 返回: %s",
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
actual_price,
trade_cmd["datetime"],
response,
) | 分发交易指令到对应的 user 并执行
:param trade_cmd:
:param users:
:param expire_seconds:
:param entrust_prop:
:param send_interval:
:return: | Below is the the instruction that describes the task:
### Input:
分发交易指令到对应的 user 并执行
:param trade_cmd:
:param users:
:param expire_seconds:
:param entrust_prop:
:param send_interval:
:return:
### Response:
def _execute_trade_cmd(
self, trade_cmd, users, expire_seconds, entrust_prop, send_interval
):
"""分发交易指令到对应的 user 并执行
:param trade_cmd:
:param users:
:param expire_seconds:
:param entrust_prop:
:param send_interval:
:return:
"""
for user in users:
# check expire
now = datetime.datetime.now()
expire = (now - trade_cmd["datetime"]).total_seconds()
if expire > expire_seconds:
log.warning(
"策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格: %s)超时,指令产生时间: %s 当前时间: %s, 超过设置的最大过期时间 %s 秒, 被丢弃",
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
trade_cmd["price"],
trade_cmd["datetime"],
now,
expire_seconds,
)
break
# check price
price = trade_cmd["price"]
if not self._is_number(price) or price <= 0:
log.warning(
"策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格: %s)超时,指令产生时间: %s 当前时间: %s, 价格无效 , 被丢弃",
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
trade_cmd["price"],
trade_cmd["datetime"],
now,
)
break
# check amount
if trade_cmd["amount"] <= 0:
log.warning(
"策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格: %s)超时,指令产生时间: %s 当前时间: %s, 买入股数无效 , 被丢弃",
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
trade_cmd["price"],
trade_cmd["datetime"],
now,
)
break
actual_price = self._calculate_price_by_slippage(
trade_cmd["action"], trade_cmd["price"]
)
args = {
"security": trade_cmd["stock_code"],
"price": actual_price,
"amount": trade_cmd["amount"],
"entrust_prop": entrust_prop,
}
try:
response = getattr(user, trade_cmd["action"])(**args)
except exceptions.TradeError as e:
trader_name = type(user).__name__
err_msg = "{}: {}".format(type(e).__name__, e.args)
log.error(
"%s 执行 策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格(考虑滑点): %s 指令产生时间: %s) 失败, 错误信息: %s",
trader_name,
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
actual_price,
trade_cmd["datetime"],
err_msg,
)
else:
log.info(
"策略 [%s] 指令(股票: %s 动作: %s 数量: %s 价格(考虑滑点): %s 指令产生时间: %s) 执行成功, 返回: %s",
trade_cmd["strategy_name"],
trade_cmd["stock_code"],
trade_cmd["action"],
trade_cmd["amount"],
actual_price,
trade_cmd["datetime"],
response,
) |
def as_frame(self):
""" :return: Multi-Index DataFrame """
sids, frames = self.response_map.keys(), self.response_map.values()
frame = pd.concat(frames, keys=sids, axis=1)
return frame | :return: Multi-Index DataFrame | Below is the the instruction that describes the task:
### Input:
:return: Multi-Index DataFrame
### Response:
def as_frame(self):
""" :return: Multi-Index DataFrame """
sids, frames = self.response_map.keys(), self.response_map.values()
frame = pd.concat(frames, keys=sids, axis=1)
return frame |
def end_of(self, event_id, import_options=True):
"""
Set Date-Picker as the end-date of a date-range.
Args:
- event_id (string): User-defined unique id for linking two fields
- import_options (bool): inherit options from start-date input,
default: TRUE
"""
event_id = str(event_id)
if event_id in DatePickerDictionary.items:
linked_picker = DatePickerDictionary.items[event_id]
self.config['linked_to'] = linked_picker.config['id']
if import_options:
backup_moment_format = self.config['options']['format']
self.config['options'].update(linked_picker.config['options'])
self.config['options'].update(self.options_param)
if self.format_param or 'format' in self.options_param:
self.config['options']['format'] = backup_moment_format
else:
self.format = linked_picker.format
# Setting useCurrent is necessary, see following issue
# https://github.com/Eonasdan/bootstrap-datetimepicker/issues/1075
self.config['options']['useCurrent'] = False
self._link_to(linked_picker)
else:
raise KeyError(
'start-date not specified for event_id "%s"' % event_id)
return self | Set Date-Picker as the end-date of a date-range.
Args:
- event_id (string): User-defined unique id for linking two fields
- import_options (bool): inherit options from start-date input,
default: TRUE | Below is the the instruction that describes the task:
### Input:
Set Date-Picker as the end-date of a date-range.
Args:
- event_id (string): User-defined unique id for linking two fields
- import_options (bool): inherit options from start-date input,
default: TRUE
### Response:
def end_of(self, event_id, import_options=True):
"""
Set Date-Picker as the end-date of a date-range.
Args:
- event_id (string): User-defined unique id for linking two fields
- import_options (bool): inherit options from start-date input,
default: TRUE
"""
event_id = str(event_id)
if event_id in DatePickerDictionary.items:
linked_picker = DatePickerDictionary.items[event_id]
self.config['linked_to'] = linked_picker.config['id']
if import_options:
backup_moment_format = self.config['options']['format']
self.config['options'].update(linked_picker.config['options'])
self.config['options'].update(self.options_param)
if self.format_param or 'format' in self.options_param:
self.config['options']['format'] = backup_moment_format
else:
self.format = linked_picker.format
# Setting useCurrent is necessary, see following issue
# https://github.com/Eonasdan/bootstrap-datetimepicker/issues/1075
self.config['options']['useCurrent'] = False
self._link_to(linked_picker)
else:
raise KeyError(
'start-date not specified for event_id "%s"' % event_id)
return self |
def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01):
"""Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits.
"""
infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tf.logging.info('Loading examples')
all_examples = []
for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')):
if i % 100000 == 0:
tf.logging.info('%d examples have been loaded....' % i)
ex = {x: int(y) if y.isdigit() else y for x, y in d.items()}
all_examples.append(ex)
random.seed(1)
random.shuffle(all_examples)
n_train = int(len(all_examples) * prop_train)
n_val = n_train + int(len(all_examples) * prop_val)
train = all_examples[:n_train]
val = all_examples[n_train:n_val]
test = []
for e in all_examples[n_val:]:
if e['n_intervening'] == e['n_diff_intervening']:
test.append(e)
return all_examples, train, val, test | Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits. | Below is the the instruction that describes the task:
### Input:
Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits.
### Response:
def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01):
"""Loads exampls from the tsv file.
Args:
tmp_dir: temp directory.
prop_train: proportion of the train data
prop_val: proportion of the validation data
Returns:
All examples in the dataset pluse train, test, and development splits.
"""
infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tf.logging.info('Loading examples')
all_examples = []
for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')):
if i % 100000 == 0:
tf.logging.info('%d examples have been loaded....' % i)
ex = {x: int(y) if y.isdigit() else y for x, y in d.items()}
all_examples.append(ex)
random.seed(1)
random.shuffle(all_examples)
n_train = int(len(all_examples) * prop_train)
n_val = n_train + int(len(all_examples) * prop_val)
train = all_examples[:n_train]
val = all_examples[n_train:n_val]
test = []
for e in all_examples[n_val:]:
if e['n_intervening'] == e['n_diff_intervening']:
test.append(e)
return all_examples, train, val, test |
def QA_fetch_stock_min(code, start, end, format='numpy', frequence='1min', collections=DATABASE.stock_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
else:
print("QA Error QA_fetch_stock_min parameter frequence=%s is none of 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m" % frequence)
__data = []
# code checking
code = QA_util_code_tolist(code)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, {"_id": 0}, batch_size=10000)
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(volume=res.vol, datetime=pd.to_datetime(
res.datetime)).query('volume>1').drop_duplicates(['datetime', 'code']).set_index('datetime', drop=False)
# return res
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_min format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None | 获取股票分钟线 | Below is the the instruction that describes the task:
### Input:
获取股票分钟线
### Response:
def QA_fetch_stock_min(code, start, end, format='numpy', frequence='1min', collections=DATABASE.stock_min):
'获取股票分钟线'
if frequence in ['1min', '1m']:
frequence = '1min'
elif frequence in ['5min', '5m']:
frequence = '5min'
elif frequence in ['15min', '15m']:
frequence = '15min'
elif frequence in ['30min', '30m']:
frequence = '30min'
elif frequence in ['60min', '60m']:
frequence = '60min'
else:
print("QA Error QA_fetch_stock_min parameter frequence=%s is none of 1min 1m 5min 5m 15min 15m 30min 30m 60min 60m" % frequence)
__data = []
# code checking
code = QA_util_code_tolist(code)
cursor = collections.find({
'code': {'$in': code}, "time_stamp": {
"$gte": QA_util_time_stamp(start),
"$lte": QA_util_time_stamp(end)
}, 'type': frequence
}, {"_id": 0}, batch_size=10000)
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(volume=res.vol, datetime=pd.to_datetime(
res.datetime)).query('volume>1').drop_duplicates(['datetime', 'code']).set_index('datetime', drop=False)
# return res
except:
res = None
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error QA_fetch_stock_min format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None |
def encoding_and_executable(notebook, metadata, ext):
"""Return encoding and executable lines for a notebook, if applicable"""
lines = []
comment = _SCRIPT_EXTENSIONS.get(ext, {}).get('comment')
jupytext_metadata = metadata.get('jupytext', {})
if ext not in ['.Rmd', '.md'] and 'executable' in jupytext_metadata:
lines.append(comment + '!' + jupytext_metadata.pop('executable'))
if 'encoding' in jupytext_metadata:
lines.append(jupytext_metadata.pop('encoding'))
elif ext not in ['.Rmd', '.md']:
for cell in notebook.cells:
try:
cell.source.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
lines.append(comment + _UTF8_HEADER)
break
return lines | Return encoding and executable lines for a notebook, if applicable | Below is the the instruction that describes the task:
### Input:
Return encoding and executable lines for a notebook, if applicable
### Response:
def encoding_and_executable(notebook, metadata, ext):
"""Return encoding and executable lines for a notebook, if applicable"""
lines = []
comment = _SCRIPT_EXTENSIONS.get(ext, {}).get('comment')
jupytext_metadata = metadata.get('jupytext', {})
if ext not in ['.Rmd', '.md'] and 'executable' in jupytext_metadata:
lines.append(comment + '!' + jupytext_metadata.pop('executable'))
if 'encoding' in jupytext_metadata:
lines.append(jupytext_metadata.pop('encoding'))
elif ext not in ['.Rmd', '.md']:
for cell in notebook.cells:
try:
cell.source.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
lines.append(comment + _UTF8_HEADER)
break
return lines |
def utf8(s):
"""
Coerce an object to bytes if it is Unicode.
"""
if isinstance(s, mitogen.core.UnicodeType):
s = s.encode('utf-8')
return s | Coerce an object to bytes if it is Unicode. | Below is the the instruction that describes the task:
### Input:
Coerce an object to bytes if it is Unicode.
### Response:
def utf8(s):
"""
Coerce an object to bytes if it is Unicode.
"""
if isinstance(s, mitogen.core.UnicodeType):
s = s.encode('utf-8')
return s |
def _save_npz(self):
'''
Saves all of the de-trending information to disk in an `npz` file
'''
# Save the data
d = dict(self.__dict__)
d.pop('_weights', None)
d.pop('_A', None)
d.pop('_B', None)
d.pop('_f', None)
d.pop('_mK', None)
d.pop('K', None)
d.pop('dvs', None)
d.pop('clobber', None)
d.pop('clobber_tpf', None)
d.pop('_mission', None)
d.pop('debug', None)
np.savez(os.path.join(self.dir, self.name + '.npz'), **d) | Saves all of the de-trending information to disk in an `npz` file | Below is the the instruction that describes the task:
### Input:
Saves all of the de-trending information to disk in an `npz` file
### Response:
def _save_npz(self):
'''
Saves all of the de-trending information to disk in an `npz` file
'''
# Save the data
d = dict(self.__dict__)
d.pop('_weights', None)
d.pop('_A', None)
d.pop('_B', None)
d.pop('_f', None)
d.pop('_mK', None)
d.pop('K', None)
d.pop('dvs', None)
d.pop('clobber', None)
d.pop('clobber_tpf', None)
d.pop('_mission', None)
d.pop('debug', None)
np.savez(os.path.join(self.dir, self.name + '.npz'), **d) |
def download_pip_based_installations(env, pip_invocation, requirements,
download_cache_folder):
"""Download requirements for pip based installation."""
if config.installation_cache_folder() is None:
raise EnvironmentSetupError("Local installation cache folder not "
"defined but required for downloading pip based installations.")
# Installation cache folder needs to be explicitly created for pip to be
# able to copy its downloaded installation files into it. The same does not
# hold for pip's download cache folder which gets created by pip on-demand.
# Seen using Python 3.4.0 & pip 1.5.4.
_create_installation_cache_folder_if_needed()
try:
pip_options = ["install", "-d", config.installation_cache_folder(),
"--exists-action=i"]
pip_options.extend(pip_download_cache_options(download_cache_folder))
# Running pip based installations on Python 2.5.
# * Python 2.5 does not come with SSL support enabled by default and
# so pip can not use SSL certified downloads from PyPI.
# * To work around this either install the
# https://pypi.python.org/pypi/ssl package or run pip using the
# '--insecure' command-line options.
# * Installing the ssl package seems ridden with problems on
# Python 2.5 so this workaround has not been tested.
if (2, 5) <= env.sys_version_info < (2, 6):
# There are some potential cases where we do not need to use
# "--insecure", e.g. if the target Python environment already has
# the 'ssl' module installed. However, detecting whether this is so
# does not seem to be worth the effort. The only way to detect
# whether secure download is supported would be to scan the target
# environment for this information, e.g. setuptools has this
# information in its pip.backwardcompat.ssl variable - if it is
# None, the necessary SSL support is not available. But then we
# would have to be careful:
# - not to run the scan if we already know this information from
# some previous scan
# - to track all actions that could have invalidated our previous
# scan results, etc.
# It just does not seem to be worth the hassle so for now - YAGNI.
pip_options.append("--insecure")
env.execute(pip_invocation + pip_options + requirements)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
raise EnvironmentSetupError("pip based download failed.") | Download requirements for pip based installation. | Below is the the instruction that describes the task:
### Input:
Download requirements for pip based installation.
### Response:
def download_pip_based_installations(env, pip_invocation, requirements,
download_cache_folder):
"""Download requirements for pip based installation."""
if config.installation_cache_folder() is None:
raise EnvironmentSetupError("Local installation cache folder not "
"defined but required for downloading pip based installations.")
# Installation cache folder needs to be explicitly created for pip to be
# able to copy its downloaded installation files into it. The same does not
# hold for pip's download cache folder which gets created by pip on-demand.
# Seen using Python 3.4.0 & pip 1.5.4.
_create_installation_cache_folder_if_needed()
try:
pip_options = ["install", "-d", config.installation_cache_folder(),
"--exists-action=i"]
pip_options.extend(pip_download_cache_options(download_cache_folder))
# Running pip based installations on Python 2.5.
# * Python 2.5 does not come with SSL support enabled by default and
# so pip can not use SSL certified downloads from PyPI.
# * To work around this either install the
# https://pypi.python.org/pypi/ssl package or run pip using the
# '--insecure' command-line options.
# * Installing the ssl package seems ridden with problems on
# Python 2.5 so this workaround has not been tested.
if (2, 5) <= env.sys_version_info < (2, 6):
# There are some potential cases where we do not need to use
# "--insecure", e.g. if the target Python environment already has
# the 'ssl' module installed. However, detecting whether this is so
# does not seem to be worth the effort. The only way to detect
# whether secure download is supported would be to scan the target
# environment for this information, e.g. setuptools has this
# information in its pip.backwardcompat.ssl variable - if it is
# None, the necessary SSL support is not available. But then we
# would have to be careful:
# - not to run the scan if we already know this information from
# some previous scan
# - to track all actions that could have invalidated our previous
# scan results, etc.
# It just does not seem to be worth the hassle so for now - YAGNI.
pip_options.append("--insecure")
env.execute(pip_invocation + pip_options + requirements)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
raise EnvironmentSetupError("pip based download failed.") |
def now (time_t = None, slug = False):
'''Gives current time as tuple (t, t_str) where
t is integer portion from time.time()
t_str string of t using DEFAULT_TIMEFORMAT
'''
if not time_t:
time_t = time ()
time_s = strftime (
DEFAULT_SLUGFORMAT if slug else DEFAULT_TIMEFORMAT,
gmtime (time_t))
return (int (time_t), time_s) | Gives current time as tuple (t, t_str) where
t is integer portion from time.time()
t_str string of t using DEFAULT_TIMEFORMAT | Below is the the instruction that describes the task:
### Input:
Gives current time as tuple (t, t_str) where
t is integer portion from time.time()
t_str string of t using DEFAULT_TIMEFORMAT
### Response:
def now (time_t = None, slug = False):
'''Gives current time as tuple (t, t_str) where
t is integer portion from time.time()
t_str string of t using DEFAULT_TIMEFORMAT
'''
if not time_t:
time_t = time ()
time_s = strftime (
DEFAULT_SLUGFORMAT if slug else DEFAULT_TIMEFORMAT,
gmtime (time_t))
return (int (time_t), time_s) |
def make_published(self, request, queryset):
"""
Marks selected news items as published
"""
rows_updated = queryset.update(is_published=True)
self.message_user(request,
ungettext('%(count)d newsitem was published',
'%(count)d newsitems were published',
rows_updated) % {'count': rows_updated}) | Marks selected news items as published | Below is the the instruction that describes the task:
### Input:
Marks selected news items as published
### Response:
def make_published(self, request, queryset):
"""
Marks selected news items as published
"""
rows_updated = queryset.update(is_published=True)
self.message_user(request,
ungettext('%(count)d newsitem was published',
'%(count)d newsitems were published',
rows_updated) % {'count': rows_updated}) |
def connect(url, prefix=None, **kwargs):
"""
connect and return a connection instance from url
arguments:
- url (str): xbahn connection url
"""
return connection(url, prefix=get_prefix(prefix), **kwargs) | connect and return a connection instance from url
arguments:
- url (str): xbahn connection url | Below is the the instruction that describes the task:
### Input:
connect and return a connection instance from url
arguments:
- url (str): xbahn connection url
### Response:
def connect(url, prefix=None, **kwargs):
"""
connect and return a connection instance from url
arguments:
- url (str): xbahn connection url
"""
return connection(url, prefix=get_prefix(prefix), **kwargs) |
def get_port_profile_for_vlan(self, vlan_id, device_id):
"""Returns Vlan id associated with the port profile."""
entry = self.session.query(ucsm_model.PortProfile).filter_by(
vlan_id=vlan_id, device_id=device_id).first()
return entry.profile_id if entry else None | Returns Vlan id associated with the port profile. | Below is the the instruction that describes the task:
### Input:
Returns Vlan id associated with the port profile.
### Response:
def get_port_profile_for_vlan(self, vlan_id, device_id):
"""Returns Vlan id associated with the port profile."""
entry = self.session.query(ucsm_model.PortProfile).filter_by(
vlan_id=vlan_id, device_id=device_id).first()
return entry.profile_id if entry else None |
def get_metric_index(self, data_source):
"""
This function will return the elasticsearch index for a corresponding
data source. It chooses in between the default and the user inputed
es indices and returns the user inputed one if it is available.
:param data_source: the data source for which the index has to be returned
:returns: an elasticsearch index name
"""
if data_source in self.index_dict:
index = self.index_dict[data_source]
else:
index = self.class2index[self.ds2class[data_source]]
return Index(index_name=index) | This function will return the elasticsearch index for a corresponding
data source. It chooses in between the default and the user inputed
es indices and returns the user inputed one if it is available.
:param data_source: the data source for which the index has to be returned
:returns: an elasticsearch index name | Below is the the instruction that describes the task:
### Input:
This function will return the elasticsearch index for a corresponding
data source. It chooses in between the default and the user inputed
es indices and returns the user inputed one if it is available.
:param data_source: the data source for which the index has to be returned
:returns: an elasticsearch index name
### Response:
def get_metric_index(self, data_source):
"""
This function will return the elasticsearch index for a corresponding
data source. It chooses in between the default and the user inputed
es indices and returns the user inputed one if it is available.
:param data_source: the data source for which the index has to be returned
:returns: an elasticsearch index name
"""
if data_source in self.index_dict:
index = self.index_dict[data_source]
else:
index = self.class2index[self.ds2class[data_source]]
return Index(index_name=index) |
def queue_file_io_task(self, fileobj, data, offset):
"""Queue IO write for submission to the IO executor.
This method accepts an IO executor and information about the
downloaded data, and handles submitting this to the IO executor.
This method may defer submission to the IO executor if necessary.
"""
self._transfer_coordinator.submit(
self._io_executor,
self.get_io_write_task(fileobj, data, offset)
) | Queue IO write for submission to the IO executor.
This method accepts an IO executor and information about the
downloaded data, and handles submitting this to the IO executor.
This method may defer submission to the IO executor if necessary. | Below is the the instruction that describes the task:
### Input:
Queue IO write for submission to the IO executor.
This method accepts an IO executor and information about the
downloaded data, and handles submitting this to the IO executor.
This method may defer submission to the IO executor if necessary.
### Response:
def queue_file_io_task(self, fileobj, data, offset):
"""Queue IO write for submission to the IO executor.
This method accepts an IO executor and information about the
downloaded data, and handles submitting this to the IO executor.
This method may defer submission to the IO executor if necessary.
"""
self._transfer_coordinator.submit(
self._io_executor,
self.get_io_write_task(fileobj, data, offset)
) |
def get_bright(mask, image, ret_data="avg,sd"):
"""Compute avg and/or std of the event brightness
The event brightness is defined by the gray-scale values of the
image data within the event mask area.
Parameters
----------
mask: ndarray or list of ndarrays of shape (M,N) and dtype bool
The mask values, True where the event is located in `image`.
image: ndarray or list of ndarrays of shape (M,N)
A 2D array that holds the image in form of grayscale values
of an event.
ret_data: str
A comma-separated list of metrices to compute
- "avg": compute the average
- "sd": compute the standard deviation
Selected metrics are returned in alphabetical order.
Returns
-------
bright_avg: float or ndarray of size N
Average image data within the contour
bright_std: float or ndarray of size N
Standard deviation of image data within the contour
"""
# This method is based on a pull request by Maik Herbig.
ret_avg = "avg" in ret_data
ret_std = "sd" in ret_data
if ret_avg + ret_std == 0:
raise ValueError("No valid metrices selected!")
if isinstance(mask, np.ndarray) and len(mask.shape) == 2:
# We have a single image
image = [image]
mask = [mask]
ret_list = False
else:
ret_list = True
length = min(len(mask), len(image))
# Results are stored in a separate array initialized with nans
if ret_avg:
avg = np.zeros(length, dtype=float) * np.nan
if ret_std:
std = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
imgi = image[ii]
mski = mask[ii]
# Assign results
if ret_avg:
avg[ii] = np.mean(imgi[mski])
if ret_std:
std[ii] = np.std(imgi[mski])
results = []
# Keep alphabetical order
if ret_avg:
results.append(avg)
if ret_std:
results.append(std)
if not ret_list:
# Only return scalars
results = [r[0] for r in results]
if ret_avg+ret_std == 1:
# Only return one column
return results[0]
return results | Compute avg and/or std of the event brightness
The event brightness is defined by the gray-scale values of the
image data within the event mask area.
Parameters
----------
mask: ndarray or list of ndarrays of shape (M,N) and dtype bool
The mask values, True where the event is located in `image`.
image: ndarray or list of ndarrays of shape (M,N)
A 2D array that holds the image in form of grayscale values
of an event.
ret_data: str
A comma-separated list of metrices to compute
- "avg": compute the average
- "sd": compute the standard deviation
Selected metrics are returned in alphabetical order.
Returns
-------
bright_avg: float or ndarray of size N
Average image data within the contour
bright_std: float or ndarray of size N
Standard deviation of image data within the contour | Below is the the instruction that describes the task:
### Input:
Compute avg and/or std of the event brightness
The event brightness is defined by the gray-scale values of the
image data within the event mask area.
Parameters
----------
mask: ndarray or list of ndarrays of shape (M,N) and dtype bool
The mask values, True where the event is located in `image`.
image: ndarray or list of ndarrays of shape (M,N)
A 2D array that holds the image in form of grayscale values
of an event.
ret_data: str
A comma-separated list of metrices to compute
- "avg": compute the average
- "sd": compute the standard deviation
Selected metrics are returned in alphabetical order.
Returns
-------
bright_avg: float or ndarray of size N
Average image data within the contour
bright_std: float or ndarray of size N
Standard deviation of image data within the contour
### Response:
def get_bright(mask, image, ret_data="avg,sd"):
"""Compute avg and/or std of the event brightness
The event brightness is defined by the gray-scale values of the
image data within the event mask area.
Parameters
----------
mask: ndarray or list of ndarrays of shape (M,N) and dtype bool
The mask values, True where the event is located in `image`.
image: ndarray or list of ndarrays of shape (M,N)
A 2D array that holds the image in form of grayscale values
of an event.
ret_data: str
A comma-separated list of metrices to compute
- "avg": compute the average
- "sd": compute the standard deviation
Selected metrics are returned in alphabetical order.
Returns
-------
bright_avg: float or ndarray of size N
Average image data within the contour
bright_std: float or ndarray of size N
Standard deviation of image data within the contour
"""
# This method is based on a pull request by Maik Herbig.
ret_avg = "avg" in ret_data
ret_std = "sd" in ret_data
if ret_avg + ret_std == 0:
raise ValueError("No valid metrices selected!")
if isinstance(mask, np.ndarray) and len(mask.shape) == 2:
# We have a single image
image = [image]
mask = [mask]
ret_list = False
else:
ret_list = True
length = min(len(mask), len(image))
# Results are stored in a separate array initialized with nans
if ret_avg:
avg = np.zeros(length, dtype=float) * np.nan
if ret_std:
std = np.zeros(length, dtype=float) * np.nan
for ii in range(length):
imgi = image[ii]
mski = mask[ii]
# Assign results
if ret_avg:
avg[ii] = np.mean(imgi[mski])
if ret_std:
std[ii] = np.std(imgi[mski])
results = []
# Keep alphabetical order
if ret_avg:
results.append(avg)
if ret_std:
results.append(std)
if not ret_list:
# Only return scalars
results = [r[0] for r in results]
if ret_avg+ret_std == 1:
# Only return one column
return results[0]
return results |
def get_chunk_coords(self):
"""
Return the x,z coordinates and length of the chunks that are defined in te regionfile.
This includes chunks which may not be readable for whatever reason.
This method is deprecated. Use :meth:`get_metadata` instead.
"""
chunks = []
for x in range(32):
for z in range(32):
m = self.metadata[x,z]
if m.is_created():
chunks.append({'x': x, 'z': z, 'length': m.blocklength})
return chunks | Return the x,z coordinates and length of the chunks that are defined in te regionfile.
This includes chunks which may not be readable for whatever reason.
This method is deprecated. Use :meth:`get_metadata` instead. | Below is the the instruction that describes the task:
### Input:
Return the x,z coordinates and length of the chunks that are defined in te regionfile.
This includes chunks which may not be readable for whatever reason.
This method is deprecated. Use :meth:`get_metadata` instead.
### Response:
def get_chunk_coords(self):
"""
Return the x,z coordinates and length of the chunks that are defined in te regionfile.
This includes chunks which may not be readable for whatever reason.
This method is deprecated. Use :meth:`get_metadata` instead.
"""
chunks = []
for x in range(32):
for z in range(32):
m = self.metadata[x,z]
if m.is_created():
chunks.append({'x': x, 'z': z, 'length': m.blocklength})
return chunks |
def _findExpressionEnd(self, block):
"""Find end of the last expression
"""
while block.isValid():
column = self._lastColumn(block)
if column > 0:
return block, column
block = block.previous()
raise UserWarning() | Find end of the last expression | Below is the the instruction that describes the task:
### Input:
Find end of the last expression
### Response:
def _findExpressionEnd(self, block):
"""Find end of the last expression
"""
while block.isValid():
column = self._lastColumn(block)
if column > 0:
return block, column
block = block.previous()
raise UserWarning() |
def get_meta(self, name, meta_key=None):
'''Get the ``content`` attribute of a meta tag ``name``.
For example::
head.get_meta('decription')
returns the ``content`` attribute of the meta tag with attribute
``name`` equal to ``description`` or ``None``.
If a different meta key needs to be matched, it can be specified via
the ``meta_key`` parameter::
head.get_meta('og:title', meta_key='property')
'''
meta_key = meta_key or 'name'
for child in self.meta._children:
if isinstance(child, Html) and child.attr(meta_key) == name:
return child.attr('content') | Get the ``content`` attribute of a meta tag ``name``.
For example::
head.get_meta('decription')
returns the ``content`` attribute of the meta tag with attribute
``name`` equal to ``description`` or ``None``.
If a different meta key needs to be matched, it can be specified via
the ``meta_key`` parameter::
head.get_meta('og:title', meta_key='property') | Below is the the instruction that describes the task:
### Input:
Get the ``content`` attribute of a meta tag ``name``.
For example::
head.get_meta('decription')
returns the ``content`` attribute of the meta tag with attribute
``name`` equal to ``description`` or ``None``.
If a different meta key needs to be matched, it can be specified via
the ``meta_key`` parameter::
head.get_meta('og:title', meta_key='property')
### Response:
def get_meta(self, name, meta_key=None):
'''Get the ``content`` attribute of a meta tag ``name``.
For example::
head.get_meta('decription')
returns the ``content`` attribute of the meta tag with attribute
``name`` equal to ``description`` or ``None``.
If a different meta key needs to be matched, it can be specified via
the ``meta_key`` parameter::
head.get_meta('og:title', meta_key='property')
'''
meta_key = meta_key or 'name'
for child in self.meta._children:
if isinstance(child, Html) and child.attr(meta_key) == name:
return child.attr('content') |
def verify_image_checksum(image_location, expected_checksum):
"""Verifies checksum (md5) of image file against the expected one.
This method generates the checksum of the image file on the fly and
verifies it against the expected checksum provided as argument.
:param image_location: location of image file whose checksum is verified.
:param expected_checksum: checksum to be checked against
:raises: ImageRefValidationFailed, if invalid file path or
verification fails.
"""
try:
with open(image_location, 'rb') as fd:
actual_checksum = hash_file(fd)
except IOError as e:
raise exception.ImageRefValidationFailed(image_href=image_location,
reason=e)
if actual_checksum != expected_checksum:
msg = ('Error verifying image checksum. Image %(image)s failed to '
'verify against checksum %(checksum)s. Actual checksum is: '
'%(actual_checksum)s' %
{'image': image_location, 'checksum': expected_checksum,
'actual_checksum': actual_checksum})
raise exception.ImageRefValidationFailed(image_href=image_location,
reason=msg) | Verifies checksum (md5) of image file against the expected one.
This method generates the checksum of the image file on the fly and
verifies it against the expected checksum provided as argument.
:param image_location: location of image file whose checksum is verified.
:param expected_checksum: checksum to be checked against
:raises: ImageRefValidationFailed, if invalid file path or
verification fails. | Below is the the instruction that describes the task:
### Input:
Verifies checksum (md5) of image file against the expected one.
This method generates the checksum of the image file on the fly and
verifies it against the expected checksum provided as argument.
:param image_location: location of image file whose checksum is verified.
:param expected_checksum: checksum to be checked against
:raises: ImageRefValidationFailed, if invalid file path or
verification fails.
### Response:
def verify_image_checksum(image_location, expected_checksum):
"""Verifies checksum (md5) of image file against the expected one.
This method generates the checksum of the image file on the fly and
verifies it against the expected checksum provided as argument.
:param image_location: location of image file whose checksum is verified.
:param expected_checksum: checksum to be checked against
:raises: ImageRefValidationFailed, if invalid file path or
verification fails.
"""
try:
with open(image_location, 'rb') as fd:
actual_checksum = hash_file(fd)
except IOError as e:
raise exception.ImageRefValidationFailed(image_href=image_location,
reason=e)
if actual_checksum != expected_checksum:
msg = ('Error verifying image checksum. Image %(image)s failed to '
'verify against checksum %(checksum)s. Actual checksum is: '
'%(actual_checksum)s' %
{'image': image_location, 'checksum': expected_checksum,
'actual_checksum': actual_checksum})
raise exception.ImageRefValidationFailed(image_href=image_location,
reason=msg) |
def description(self, request, id, description):
"""Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description
"""
request.data = json.dumps({
"description": description
})
return self.send(request, id).json()['html_url'] | Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description | Below is the the instruction that describes the task:
### Input:
Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description
### Response:
def description(self, request, id, description):
"""Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description
"""
request.data = json.dumps({
"description": description
})
return self.send(request, id).json()['html_url'] |
def macro(name):
"""Replaces :func:`~flask_admin.model.template.macro`, adding support for using
macros imported from another file. For example:
.. code:: html+jinja
{# templates/admin/column_formatters.html #}
{% macro email(model, column) %}
{% set address = model[column] %}
<a href="mailto:{{ address }}">{{ address }}</a>
{% endmacro %}
.. code:: python
class FooAdmin(ModelAdmin):
column_formatters = {
'col_name': macro('column_formatters.email')
}
Also required for this to work, is to add the following to the top of your
master admin template:
.. code:: html+jinja
{# templates/admin/master.html #}
{% import 'admin/column_formatters.html' as column_formatters with context %}
"""
def wrapper(view, context, model, column):
if '.' in name:
macro_import_name, macro_name = name.split('.')
m = getattr(context.get(macro_import_name), macro_name, None)
else:
m = context.resolve(name)
if not m:
return m
return m(model=model, column=column)
return wrapper | Replaces :func:`~flask_admin.model.template.macro`, adding support for using
macros imported from another file. For example:
.. code:: html+jinja
{# templates/admin/column_formatters.html #}
{% macro email(model, column) %}
{% set address = model[column] %}
<a href="mailto:{{ address }}">{{ address }}</a>
{% endmacro %}
.. code:: python
class FooAdmin(ModelAdmin):
column_formatters = {
'col_name': macro('column_formatters.email')
}
Also required for this to work, is to add the following to the top of your
master admin template:
.. code:: html+jinja
{# templates/admin/master.html #}
{% import 'admin/column_formatters.html' as column_formatters with context %} | Below is the the instruction that describes the task:
### Input:
Replaces :func:`~flask_admin.model.template.macro`, adding support for using
macros imported from another file. For example:
.. code:: html+jinja
{# templates/admin/column_formatters.html #}
{% macro email(model, column) %}
{% set address = model[column] %}
<a href="mailto:{{ address }}">{{ address }}</a>
{% endmacro %}
.. code:: python
class FooAdmin(ModelAdmin):
column_formatters = {
'col_name': macro('column_formatters.email')
}
Also required for this to work, is to add the following to the top of your
master admin template:
.. code:: html+jinja
{# templates/admin/master.html #}
{% import 'admin/column_formatters.html' as column_formatters with context %}
### Response:
def macro(name):
"""Replaces :func:`~flask_admin.model.template.macro`, adding support for using
macros imported from another file. For example:
.. code:: html+jinja
{# templates/admin/column_formatters.html #}
{% macro email(model, column) %}
{% set address = model[column] %}
<a href="mailto:{{ address }}">{{ address }}</a>
{% endmacro %}
.. code:: python
class FooAdmin(ModelAdmin):
column_formatters = {
'col_name': macro('column_formatters.email')
}
Also required for this to work, is to add the following to the top of your
master admin template:
.. code:: html+jinja
{# templates/admin/master.html #}
{% import 'admin/column_formatters.html' as column_formatters with context %}
"""
def wrapper(view, context, model, column):
if '.' in name:
macro_import_name, macro_name = name.split('.')
m = getattr(context.get(macro_import_name), macro_name, None)
else:
m = context.resolve(name)
if not m:
return m
return m(model=model, column=column)
return wrapper |
def alias(*aliases):
"""
Decorating a class with @alias('FOO', 'BAR', ..) allows the class to
be referenced by each of the names provided as arguments.
"""
def decorator(cls):
# alias must be set in globals from caller's frame
caller = sys._getframe(1)
globals_dict = caller.f_globals
for alias in aliases:
globals_dict[alias] = cls
return cls
return decorator | Decorating a class with @alias('FOO', 'BAR', ..) allows the class to
be referenced by each of the names provided as arguments. | Below is the the instruction that describes the task:
### Input:
Decorating a class with @alias('FOO', 'BAR', ..) allows the class to
be referenced by each of the names provided as arguments.
### Response:
def alias(*aliases):
"""
Decorating a class with @alias('FOO', 'BAR', ..) allows the class to
be referenced by each of the names provided as arguments.
"""
def decorator(cls):
# alias must be set in globals from caller's frame
caller = sys._getframe(1)
globals_dict = caller.f_globals
for alias in aliases:
globals_dict[alias] = cls
return cls
return decorator |
def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None,
execute_from_dir=None, all_genes=False, print_exec=False, **kwargs):
"""Prepare to run I-TASSER homology modeling for genes without structures, or all genes.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created, if not provided default is the
GEM-PRO's ``data_dir``
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
"""
if not create_in_dir:
if not self.data_dir:
raise ValueError('Output directory must be specified')
self.homology_models_dir = op.join(self.data_dir, 'homology_models')
else:
self.homology_models_dir = create_in_dir
ssbio.utils.make_dir(self.homology_models_dir)
if not execute_from_dir:
execute_from_dir = self.homology_models_dir
counter = 0
for g in self.genes_with_a_representative_sequence:
repstruct = g.protein.representative_structure
if repstruct and not all_genes:
log.debug('{}: representative structure set, skipping homology modeling'.format(g.id))
continue
g.protein.prep_itasser_modeling(itasser_installation=itasser_installation,
itlib_folder=itlib_folder, runtype=runtype,
create_in_dir=self.homology_models_dir,
execute_from_dir=execute_from_dir,
print_exec=print_exec, **kwargs)
counter += 1
log.info('Prepared I-TASSER modeling folders for {} genes in folder {}'.format(counter,
self.homology_models_dir)) | Prepare to run I-TASSER homology modeling for genes without structures, or all genes.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created, if not provided default is the
GEM-PRO's ``data_dir``
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp? | Below is the the instruction that describes the task:
### Input:
Prepare to run I-TASSER homology modeling for genes without structures, or all genes.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created, if not provided default is the
GEM-PRO's ``data_dir``
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
### Response:
def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None,
execute_from_dir=None, all_genes=False, print_exec=False, **kwargs):
"""Prepare to run I-TASSER homology modeling for genes without structures, or all genes.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created, if not provided default is the
GEM-PRO's ``data_dir``
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
"""
if not create_in_dir:
if not self.data_dir:
raise ValueError('Output directory must be specified')
self.homology_models_dir = op.join(self.data_dir, 'homology_models')
else:
self.homology_models_dir = create_in_dir
ssbio.utils.make_dir(self.homology_models_dir)
if not execute_from_dir:
execute_from_dir = self.homology_models_dir
counter = 0
for g in self.genes_with_a_representative_sequence:
repstruct = g.protein.representative_structure
if repstruct and not all_genes:
log.debug('{}: representative structure set, skipping homology modeling'.format(g.id))
continue
g.protein.prep_itasser_modeling(itasser_installation=itasser_installation,
itlib_folder=itlib_folder, runtype=runtype,
create_in_dir=self.homology_models_dir,
execute_from_dir=execute_from_dir,
print_exec=print_exec, **kwargs)
counter += 1
log.info('Prepared I-TASSER modeling folders for {} genes in folder {}'.format(counter,
self.homology_models_dir)) |
def delete_gene(self, *gene_ids):
"""Delete one or more gene ids form the list."""
self.gene_ids = [gene_id for gene_id in self.gene_ids
if gene_id not in gene_ids] | Delete one or more gene ids form the list. | Below is the the instruction that describes the task:
### Input:
Delete one or more gene ids form the list.
### Response:
def delete_gene(self, *gene_ids):
"""Delete one or more gene ids form the list."""
self.gene_ids = [gene_id for gene_id in self.gene_ids
if gene_id not in gene_ids] |
def C_ISA_1932_nozzle(D, Do, rho, mu, m):
r'''Calculates the coefficient of discharge of an ISA 1932 style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9900 - 0.2262\beta^{4.1} - (0.00175\beta^2 - 0.0033\beta^{4.15})
\left(\frac{10^6}{Re_D}\right)^{1.15}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_ISA_1932_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9635849973250495
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
C = (0.9900 - 0.2262*beta**4.1
- (0.00175*beta**2 - 0.0033*beta**4.15)*(1E6/Re_D)**1.15)
return C | r'''Calculates the coefficient of discharge of an ISA 1932 style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9900 - 0.2262\beta^{4.1} - (0.00175\beta^2 - 0.0033\beta^{4.15})
\left(\frac{10^6}{Re_D}\right)^{1.15}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_ISA_1932_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9635849973250495
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the coefficient of discharge of an ISA 1932 style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9900 - 0.2262\beta^{4.1} - (0.00175\beta^2 - 0.0033\beta^{4.15})
\left(\frac{10^6}{Re_D}\right)^{1.15}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_ISA_1932_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9635849973250495
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
### Response:
def C_ISA_1932_nozzle(D, Do, rho, mu, m):
r'''Calculates the coefficient of discharge of an ISA 1932 style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9900 - 0.2262\beta^{4.1} - (0.00175\beta^2 - 0.0033\beta^{4.15})
\left(\frac{10^6}{Re_D}\right)^{1.15}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_ISA_1932_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9635849973250495
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
C = (0.9900 - 0.2262*beta**4.1
- (0.00175*beta**2 - 0.0033*beta**4.15)*(1E6/Re_D)**1.15)
return C |
def fcoe_fcoe_fabric_map_fcoe_fcf_map_fcf_map_fcf_rbid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe")
fcoe_fabric_map = ET.SubElement(fcoe, "fcoe-fabric-map")
fcoe_fabric_map_name_key = ET.SubElement(fcoe_fabric_map, "fcoe-fabric-map-name")
fcoe_fabric_map_name_key.text = kwargs.pop('fcoe_fabric_map_name')
fcoe_fcf_map = ET.SubElement(fcoe_fabric_map, "fcoe-fcf-map")
fcf_map_name_key = ET.SubElement(fcoe_fcf_map, "fcf-map-name")
fcf_map_name_key.text = kwargs.pop('fcf_map_name')
fcf_map_fcf_rbid = ET.SubElement(fcoe_fcf_map, "fcf-map-fcf-rbid")
fcf_map_fcf_rbid.text = kwargs.pop('fcf_map_fcf_rbid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fcoe_fcoe_fabric_map_fcoe_fcf_map_fcf_map_fcf_rbid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe")
fcoe_fabric_map = ET.SubElement(fcoe, "fcoe-fabric-map")
fcoe_fabric_map_name_key = ET.SubElement(fcoe_fabric_map, "fcoe-fabric-map-name")
fcoe_fabric_map_name_key.text = kwargs.pop('fcoe_fabric_map_name')
fcoe_fcf_map = ET.SubElement(fcoe_fabric_map, "fcoe-fcf-map")
fcf_map_name_key = ET.SubElement(fcoe_fcf_map, "fcf-map-name")
fcf_map_name_key.text = kwargs.pop('fcf_map_name')
fcf_map_fcf_rbid = ET.SubElement(fcoe_fcf_map, "fcf-map-fcf-rbid")
fcf_map_fcf_rbid.text = kwargs.pop('fcf_map_fcf_rbid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _get_recursive_state(widget, store=None, drop_defaults=False):
"""Gets the embed state of a widget, and all other widgets it refers to as well"""
if store is None:
store = dict()
state = widget._get_embed_state(drop_defaults=drop_defaults)
store[widget.model_id] = state
# Loop over all values included in state (i.e. don't consider excluded values):
for ref in _find_widget_refs_by_state(widget, state['state']):
if ref.model_id not in store:
_get_recursive_state(ref, store, drop_defaults=drop_defaults)
return store | Gets the embed state of a widget, and all other widgets it refers to as well | Below is the the instruction that describes the task:
### Input:
Gets the embed state of a widget, and all other widgets it refers to as well
### Response:
def _get_recursive_state(widget, store=None, drop_defaults=False):
"""Gets the embed state of a widget, and all other widgets it refers to as well"""
if store is None:
store = dict()
state = widget._get_embed_state(drop_defaults=drop_defaults)
store[widget.model_id] = state
# Loop over all values included in state (i.e. don't consider excluded values):
for ref in _find_widget_refs_by_state(widget, state['state']):
if ref.model_id not in store:
_get_recursive_state(ref, store, drop_defaults=drop_defaults)
return store |
def fill_blind_pores(im):
r"""
Fills all pores that are not connected to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
image : ND-array
A version of ``im`` but with all the disconnected pores removed.
See Also
--------
find_disconnected_voxels
"""
im = sp.copy(im)
holes = find_disconnected_voxels(im)
im[holes] = False
return im | r"""
Fills all pores that are not connected to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
image : ND-array
A version of ``im`` but with all the disconnected pores removed.
See Also
--------
find_disconnected_voxels | Below is the the instruction that describes the task:
### Input:
r"""
Fills all pores that are not connected to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
image : ND-array
A version of ``im`` but with all the disconnected pores removed.
See Also
--------
find_disconnected_voxels
### Response:
def fill_blind_pores(im):
r"""
Fills all pores that are not connected to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
image : ND-array
A version of ``im`` but with all the disconnected pores removed.
See Also
--------
find_disconnected_voxels
"""
im = sp.copy(im)
holes = find_disconnected_voxels(im)
im[holes] = False
return im |
def _generate(num_particles, D, box, rs):
"""Generate a list of `Particle` objects."""
X0 = rs.rand(num_particles) * (box.x2 - box.x1) + box.x1
Y0 = rs.rand(num_particles) * (box.y2 - box.y1) + box.y1
Z0 = rs.rand(num_particles) * (box.z2 - box.z1) + box.z1
return [Particle(D=D, x0=x0, y0=y0, z0=z0)
for x0, y0, z0 in zip(X0, Y0, Z0)] | Generate a list of `Particle` objects. | Below is the the instruction that describes the task:
### Input:
Generate a list of `Particle` objects.
### Response:
def _generate(num_particles, D, box, rs):
"""Generate a list of `Particle` objects."""
X0 = rs.rand(num_particles) * (box.x2 - box.x1) + box.x1
Y0 = rs.rand(num_particles) * (box.y2 - box.y1) + box.y1
Z0 = rs.rand(num_particles) * (box.z2 - box.z1) + box.z1
return [Particle(D=D, x0=x0, y0=y0, z0=z0)
for x0, y0, z0 in zip(X0, Y0, Z0)] |
def get_data(name):
"""
This function extracts the data from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_val : pandas dataframe index
data_val : list
Examples:
>>> # Retrieve the data from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time, data = pytplot.get_data("Variable1")
"""
global data_quants
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
temp_data_quant = data_quants[name]
data_val = temp_data_quant.data.values
time_val = temp_data_quant.data.index
return(time_val, data_val) | This function extracts the data from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_val : pandas dataframe index
data_val : list
Examples:
>>> # Retrieve the data from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time, data = pytplot.get_data("Variable1") | Below is the the instruction that describes the task:
### Input:
This function extracts the data from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_val : pandas dataframe index
data_val : list
Examples:
>>> # Retrieve the data from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time, data = pytplot.get_data("Variable1")
### Response:
def get_data(name):
"""
This function extracts the data from the Tplot Variables stored in memory.
Parameters:
name : str
Name of the tplot variable
Returns:
time_val : pandas dataframe index
data_val : list
Examples:
>>> # Retrieve the data from Variable 1
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> time, data = pytplot.get_data("Variable1")
"""
global data_quants
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
temp_data_quant = data_quants[name]
data_val = temp_data_quant.data.values
time_val = temp_data_quant.data.index
return(time_val, data_val) |
def write_items(self, calendar):
"""
Write all events to the calendar
"""
for item in self.items:
event = Event()
for ifield, efield in ITEM_EVENT_FIELD_MAP:
val = item.get(ifield)
if val is not None:
event.add(efield, val)
calendar.add_component(event) | Write all events to the calendar | Below is the the instruction that describes the task:
### Input:
Write all events to the calendar
### Response:
def write_items(self, calendar):
"""
Write all events to the calendar
"""
for item in self.items:
event = Event()
for ifield, efield in ITEM_EVENT_FIELD_MAP:
val = item.get(ifield)
if val is not None:
event.add(efield, val)
calendar.add_component(event) |
def default_subreddits(self, *args, **kwargs):
"""Return a get_content generator for the default subreddits.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
url = self.config['default_subreddits']
return self.get_content(url, *args, **kwargs) | Return a get_content generator for the default subreddits.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered. | Below is the the instruction that describes the task:
### Input:
Return a get_content generator for the default subreddits.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
### Response:
def default_subreddits(self, *args, **kwargs):
"""Return a get_content generator for the default subreddits.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
url = self.config['default_subreddits']
return self.get_content(url, *args, **kwargs) |
def load(name, **kwargs):
'''
Loads the configuration provided onto the junos device.
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- path: salt//configs/interface.set
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- template_path: salt//configs/interface.set
- template_vars:
interface_name: lo0
description: Creating interface via SaltStack.
name
Path where the configuration/template file is present. If the file has
a ``*.conf`` extension, the content is treated as text format. If the
file has a ``*.xml`` extension, the content is treated as XML format. If
the file has a ``*.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses "replace:" statements.
Only those statements under the 'replace' tag will be changed.
format:
Determines the format of the contents.
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1 (default = False)
template_vars
Variables to be passed into the template processing engine in addition
to those present in __pillar__, __opts__, __grains__, etc.
You may reference these variables in your template like so:
{{ template_vars["var_name"] }}
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
ret['changes'] = __salt__['junos.load'](name, **kwargs)
return ret | Loads the configuration provided onto the junos device.
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- path: salt//configs/interface.set
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- template_path: salt//configs/interface.set
- template_vars:
interface_name: lo0
description: Creating interface via SaltStack.
name
Path where the configuration/template file is present. If the file has
a ``*.conf`` extension, the content is treated as text format. If the
file has a ``*.xml`` extension, the content is treated as XML format. If
the file has a ``*.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses "replace:" statements.
Only those statements under the 'replace' tag will be changed.
format:
Determines the format of the contents.
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1 (default = False)
template_vars
Variables to be passed into the template processing engine in addition
to those present in __pillar__, __opts__, __grains__, etc.
You may reference these variables in your template like so:
{{ template_vars["var_name"] }} | Below is the the instruction that describes the task:
### Input:
Loads the configuration provided onto the junos device.
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- path: salt//configs/interface.set
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- template_path: salt//configs/interface.set
- template_vars:
interface_name: lo0
description: Creating interface via SaltStack.
name
Path where the configuration/template file is present. If the file has
a ``*.conf`` extension, the content is treated as text format. If the
file has a ``*.xml`` extension, the content is treated as XML format. If
the file has a ``*.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses "replace:" statements.
Only those statements under the 'replace' tag will be changed.
format:
Determines the format of the contents.
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1 (default = False)
template_vars
Variables to be passed into the template processing engine in addition
to those present in __pillar__, __opts__, __grains__, etc.
You may reference these variables in your template like so:
{{ template_vars["var_name"] }}
### Response:
def load(name, **kwargs):
'''
Loads the configuration provided onto the junos device.
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- path: salt//configs/interface.set
.. code-block:: yaml
Install the mentioned config:
junos:
- load
- template_path: salt//configs/interface.set
- template_vars:
interface_name: lo0
description: Creating interface via SaltStack.
name
Path where the configuration/template file is present. If the file has
a ``*.conf`` extension, the content is treated as text format. If the
file has a ``*.xml`` extension, the content is treated as XML format. If
the file has a ``*.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses "replace:" statements.
Only those statements under the 'replace' tag will be changed.
format:
Determines the format of the contents.
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1 (default = False)
template_vars
Variables to be passed into the template processing engine in addition
to those present in __pillar__, __opts__, __grains__, etc.
You may reference these variables in your template like so:
{{ template_vars["var_name"] }}
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
ret['changes'] = __salt__['junos.load'](name, **kwargs)
return ret |
def AgregarPrecioClase(self, clase_tabaco, precio, total_kilos=None, total_fardos=None, **kwargs):
"Agrego un PrecioClase a la liq."
precioclase = dict(claseTabaco=clase_tabaco, precio=precio,
totalKilos=total_kilos, totalFardos=total_fardos)
self.solicitud['precioClase'].append(precioclase)
return True | Agrego un PrecioClase a la liq. | Below is the the instruction that describes the task:
### Input:
Agrego un PrecioClase a la liq.
### Response:
def AgregarPrecioClase(self, clase_tabaco, precio, total_kilos=None, total_fardos=None, **kwargs):
"Agrego un PrecioClase a la liq."
precioclase = dict(claseTabaco=clase_tabaco, precio=precio,
totalKilos=total_kilos, totalFardos=total_fardos)
self.solicitud['precioClase'].append(precioclase)
return True |
def commissionerUnregister(self):
"""stop commissioner
Returns:
True: successful to stop commissioner
False: fail to stop commissioner
"""
print '%s call commissionerUnregister' % self.port
cmd = 'commissioner stop'
print cmd
return self.__sendCommand(cmd)[0] == 'Done' | stop commissioner
Returns:
True: successful to stop commissioner
False: fail to stop commissioner | Below is the the instruction that describes the task:
### Input:
stop commissioner
Returns:
True: successful to stop commissioner
False: fail to stop commissioner
### Response:
def commissionerUnregister(self):
"""stop commissioner
Returns:
True: successful to stop commissioner
False: fail to stop commissioner
"""
print '%s call commissionerUnregister' % self.port
cmd = 'commissioner stop'
print cmd
return self.__sendCommand(cmd)[0] == 'Done' |
def getLogger(cls, name=None):
"""
Retrieves the Python native logger
:param name: The name of the logger instance in the VSG namespace (VSG.<name>); a None value will use the VSG root.
:return: The instacne of the Python logger object.
"""
return logging.getLogger("{0}.{1}".format(cls.BASENAME, name) if name else cls.BASENAME) | Retrieves the Python native logger
:param name: The name of the logger instance in the VSG namespace (VSG.<name>); a None value will use the VSG root.
:return: The instacne of the Python logger object. | Below is the the instruction that describes the task:
### Input:
Retrieves the Python native logger
:param name: The name of the logger instance in the VSG namespace (VSG.<name>); a None value will use the VSG root.
:return: The instacne of the Python logger object.
### Response:
def getLogger(cls, name=None):
"""
Retrieves the Python native logger
:param name: The name of the logger instance in the VSG namespace (VSG.<name>); a None value will use the VSG root.
:return: The instacne of the Python logger object.
"""
return logging.getLogger("{0}.{1}".format(cls.BASENAME, name) if name else cls.BASENAME) |
def create(self):
"""
Creates the node.
"""
log.info("{module}: {name} [{id}] created".format(module=self.manager.module_name,
name=self.name,
id=self.id)) | Creates the node. | Below is the the instruction that describes the task:
### Input:
Creates the node.
### Response:
def create(self):
"""
Creates the node.
"""
log.info("{module}: {name} [{id}] created".format(module=self.manager.module_name,
name=self.name,
id=self.id)) |
def create_endpoints_csv_file(self, timeout=-1):
"""
Creates an endpoints CSV file for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Endpoint CSV File Response.
"""
uri = "{}/endpoints/".format(self.data["uri"])
return self._helper.do_post(uri, {}, timeout, None) | Creates an endpoints CSV file for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Endpoint CSV File Response. | Below is the the instruction that describes the task:
### Input:
Creates an endpoints CSV file for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Endpoint CSV File Response.
### Response:
def create_endpoints_csv_file(self, timeout=-1):
"""
Creates an endpoints CSV file for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Endpoint CSV File Response.
"""
uri = "{}/endpoints/".format(self.data["uri"])
return self._helper.do_post(uri, {}, timeout, None) |
def sample_double_norm(mean, std_upper, std_lower, size):
"""Note that this function requires Scipy."""
from scipy.special import erfinv
# There's probably a better way to do this. We first draw percentiles
# uniformly between 0 and 1. We want the peak of the distribution to occur
# at `mean`. However, if we assign 50% of the samples to the lower half
# and 50% to the upper half, the side with the smaller variance will be
# overrepresented because of the 1/sigma normalization of the Gaussian
# PDF. Therefore we need to divide points between the two halves with a
# fraction `cutoff` (defined below) going to the lower half. Having
# partitioned them this way, we can then use the standard Gaussian
# quantile function to go from percentiles to sample values -- except that
# we must remap from [0, cutoff] to [0, 0.5] and from [cutoff, 1] to [0.5,
# 1].
samples = np.empty(size)
percentiles = np.random.uniform(0., 1., size)
cutoff = std_lower / (std_lower + std_upper)
w = (percentiles < cutoff)
percentiles[w] *= 0.5 / cutoff
samples[w] = mean + np.sqrt(2) * std_lower * erfinv(2 * percentiles[w] - 1)
w = ~w
percentiles[w] = 1 - (1 - percentiles[w]) * 0.5 / (1 - cutoff)
samples[w] = mean + np.sqrt(2) * std_upper * erfinv(2 * percentiles[w] - 1)
return samples | Note that this function requires Scipy. | Below is the the instruction that describes the task:
### Input:
Note that this function requires Scipy.
### Response:
def sample_double_norm(mean, std_upper, std_lower, size):
"""Note that this function requires Scipy."""
from scipy.special import erfinv
# There's probably a better way to do this. We first draw percentiles
# uniformly between 0 and 1. We want the peak of the distribution to occur
# at `mean`. However, if we assign 50% of the samples to the lower half
# and 50% to the upper half, the side with the smaller variance will be
# overrepresented because of the 1/sigma normalization of the Gaussian
# PDF. Therefore we need to divide points between the two halves with a
# fraction `cutoff` (defined below) going to the lower half. Having
# partitioned them this way, we can then use the standard Gaussian
# quantile function to go from percentiles to sample values -- except that
# we must remap from [0, cutoff] to [0, 0.5] and from [cutoff, 1] to [0.5,
# 1].
samples = np.empty(size)
percentiles = np.random.uniform(0., 1., size)
cutoff = std_lower / (std_lower + std_upper)
w = (percentiles < cutoff)
percentiles[w] *= 0.5 / cutoff
samples[w] = mean + np.sqrt(2) * std_lower * erfinv(2 * percentiles[w] - 1)
w = ~w
percentiles[w] = 1 - (1 - percentiles[w]) * 0.5 / (1 - cutoff)
samples[w] = mean + np.sqrt(2) * std_upper * erfinv(2 * percentiles[w] - 1)
return samples |
def ungrab_server(self, onerror = None):
"""Release the server if it was previously grabbed by this client."""
request.UngrabServer(display = self.display,
onerror = onerror) | Release the server if it was previously grabbed by this client. | Below is the the instruction that describes the task:
### Input:
Release the server if it was previously grabbed by this client.
### Response:
def ungrab_server(self, onerror = None):
"""Release the server if it was previously grabbed by this client."""
request.UngrabServer(display = self.display,
onerror = onerror) |
def filter_curve(self):
"""filter the poor performing curve
Returns
-------
None
"""
avg = np.sum(self.trial_history) / self.point_num
standard = avg * avg * self.point_num
predict_data = []
tmp_model = []
for i in range(NUM_OF_FUNCTIONS):
var = 0
model = curve_combination_models[i]
for j in range(1, self.point_num + 1):
y = self.predict_y(model, j)
var += (y - self.trial_history[j - 1]) * (y - self.trial_history[j - 1])
if var < standard:
predict_data.append(y)
tmp_model.append(curve_combination_models[i])
median = np.median(predict_data)
std = np.std(predict_data)
for model in tmp_model:
y = self.predict_y(model, self.target_pos)
epsilon = self.point_num / 10 * std
if y < median + epsilon and y > median - epsilon:
self.effective_model.append(model)
self.effective_model_num = len(self.effective_model)
logger.info('List of effective model: ', self.effective_model) | filter the poor performing curve
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
filter the poor performing curve
Returns
-------
None
### Response:
def filter_curve(self):
"""filter the poor performing curve
Returns
-------
None
"""
avg = np.sum(self.trial_history) / self.point_num
standard = avg * avg * self.point_num
predict_data = []
tmp_model = []
for i in range(NUM_OF_FUNCTIONS):
var = 0
model = curve_combination_models[i]
for j in range(1, self.point_num + 1):
y = self.predict_y(model, j)
var += (y - self.trial_history[j - 1]) * (y - self.trial_history[j - 1])
if var < standard:
predict_data.append(y)
tmp_model.append(curve_combination_models[i])
median = np.median(predict_data)
std = np.std(predict_data)
for model in tmp_model:
y = self.predict_y(model, self.target_pos)
epsilon = self.point_num / 10 * std
if y < median + epsilon and y > median - epsilon:
self.effective_model.append(model)
self.effective_model_num = len(self.effective_model)
logger.info('List of effective model: ', self.effective_model) |
def add_item(self, text, font=("default", 12, "bold"), backgroundcolor="yellow", textcolor="black",
highlightcolor="blue"):
"""
Add a new item on the Canvas.
:param text: text to display
:type text: str
:param font: font of the text
:type font: tuple or :class:`~tkinter.font.Font`
:param backgroundcolor: background color
:type backgroundcolor: str
:param textcolor: text color
:type textcolor: str
:param highlightcolor: the color of the text when the item is selected
:type highlightcolor: str
"""
item = self.canvas.create_text(0, 0, anchor=tk.NW, text=text, font=font, fill=textcolor, tag="item")
rectangle = self.canvas.create_rectangle(self.canvas.bbox(item), fill=backgroundcolor)
self.canvas.tag_lower(rectangle, item)
self.items[item] = rectangle
if callable(self._callback_add):
self._callback_add(item, rectangle)
self.item_colors[item] = (backgroundcolor, textcolor, highlightcolor) | Add a new item on the Canvas.
:param text: text to display
:type text: str
:param font: font of the text
:type font: tuple or :class:`~tkinter.font.Font`
:param backgroundcolor: background color
:type backgroundcolor: str
:param textcolor: text color
:type textcolor: str
:param highlightcolor: the color of the text when the item is selected
:type highlightcolor: str | Below is the the instruction that describes the task:
### Input:
Add a new item on the Canvas.
:param text: text to display
:type text: str
:param font: font of the text
:type font: tuple or :class:`~tkinter.font.Font`
:param backgroundcolor: background color
:type backgroundcolor: str
:param textcolor: text color
:type textcolor: str
:param highlightcolor: the color of the text when the item is selected
:type highlightcolor: str
### Response:
def add_item(self, text, font=("default", 12, "bold"), backgroundcolor="yellow", textcolor="black",
highlightcolor="blue"):
"""
Add a new item on the Canvas.
:param text: text to display
:type text: str
:param font: font of the text
:type font: tuple or :class:`~tkinter.font.Font`
:param backgroundcolor: background color
:type backgroundcolor: str
:param textcolor: text color
:type textcolor: str
:param highlightcolor: the color of the text when the item is selected
:type highlightcolor: str
"""
item = self.canvas.create_text(0, 0, anchor=tk.NW, text=text, font=font, fill=textcolor, tag="item")
rectangle = self.canvas.create_rectangle(self.canvas.bbox(item), fill=backgroundcolor)
self.canvas.tag_lower(rectangle, item)
self.items[item] = rectangle
if callable(self._callback_add):
self._callback_add(item, rectangle)
self.item_colors[item] = (backgroundcolor, textcolor, highlightcolor) |
def code2color(color_string):
"""Returns wx.Colour from a string of a 3-tuple of floats in [0.0, 1.0]"""
color_tuple = ast.literal_eval(color_string)
color_tuple_int = map(lambda x: int(x * 255.0), color_tuple)
return wx.Colour(*color_tuple_int) | Returns wx.Colour from a string of a 3-tuple of floats in [0.0, 1.0] | Below is the the instruction that describes the task:
### Input:
Returns wx.Colour from a string of a 3-tuple of floats in [0.0, 1.0]
### Response:
def code2color(color_string):
"""Returns wx.Colour from a string of a 3-tuple of floats in [0.0, 1.0]"""
color_tuple = ast.literal_eval(color_string)
color_tuple_int = map(lambda x: int(x * 255.0), color_tuple)
return wx.Colour(*color_tuple_int) |
def get_local(self, name, recurse=True):
"""Get the local field (search for it) from the scope stack. An alias
for ``get_var``
:name: The name of the local field
"""
self._dlog("getting local '{}'".format(name))
return self._search("vars", name, recurse) | Get the local field (search for it) from the scope stack. An alias
for ``get_var``
:name: The name of the local field | Below is the the instruction that describes the task:
### Input:
Get the local field (search for it) from the scope stack. An alias
for ``get_var``
:name: The name of the local field
### Response:
def get_local(self, name, recurse=True):
"""Get the local field (search for it) from the scope stack. An alias
for ``get_var``
:name: The name of the local field
"""
self._dlog("getting local '{}'".format(name))
return self._search("vars", name, recurse) |
def _set_def_prompt(self):
"""Sets the default prompt to match the currently active unit test."""
if len(self.active) > 15:
ids = self.active.split(".")
if len(ids) > 2:
module, executable, compiler = ids
else:
module, executable = ids
compiler = "g"
self.prompt = "({}*.{}*.{}:{})".format(module[0:6], executable[0:6], compiler, self.group)
else:
self.prompt = "({}:{})".format(self.active, self.group) | Sets the default prompt to match the currently active unit test. | Below is the the instruction that describes the task:
### Input:
Sets the default prompt to match the currently active unit test.
### Response:
def _set_def_prompt(self):
"""Sets the default prompt to match the currently active unit test."""
if len(self.active) > 15:
ids = self.active.split(".")
if len(ids) > 2:
module, executable, compiler = ids
else:
module, executable = ids
compiler = "g"
self.prompt = "({}*.{}*.{}:{})".format(module[0:6], executable[0:6], compiler, self.group)
else:
self.prompt = "({}:{})".format(self.active, self.group) |
def timestamp_from_datetime(date_time):
"""Returns POSIX timestamp as float"""
if date_time.tzinfo is None:
return time.mktime((date_time.year, date_time.month, date_time.day, date_time.hour,
date_time.minute, date_time.second,
-1, -1, -1)) + date_time.microsecond / 1e6
return (date_time - _EPOCH).total_seconds() | Returns POSIX timestamp as float | Below is the the instruction that describes the task:
### Input:
Returns POSIX timestamp as float
### Response:
def timestamp_from_datetime(date_time):
"""Returns POSIX timestamp as float"""
if date_time.tzinfo is None:
return time.mktime((date_time.year, date_time.month, date_time.day, date_time.hour,
date_time.minute, date_time.second,
-1, -1, -1)) + date_time.microsecond / 1e6
return (date_time - _EPOCH).total_seconds() |
def write_base (self, url_data):
"""Write url_data.base_ref."""
self.write(self.part("base") + self.spaces("base"))
self.writeln(url_data.base_ref, color=self.colorbase) | Write url_data.base_ref. | Below is the the instruction that describes the task:
### Input:
Write url_data.base_ref.
### Response:
def write_base (self, url_data):
"""Write url_data.base_ref."""
self.write(self.part("base") + self.spaces("base"))
self.writeln(url_data.base_ref, color=self.colorbase) |
def error_handler(f):
"""Handle uncaught OAuth errors."""
@wraps(f)
def decorated(*args, **kwargs):
try:
return f(*args, **kwargs)
except OAuth2Error as e:
# Only FatalClientError are handled by Flask-OAuthlib (as these
# errors should not be redirect back to the client - see
# http://tools.ietf.org/html/rfc6749#section-4.2.2.1)
if hasattr(e, 'redirect_uri'):
return redirect(e.in_uri(e.redirect_uri))
else:
return redirect(e.in_uri(oauth2.error_uri))
return decorated | Handle uncaught OAuth errors. | Below is the the instruction that describes the task:
### Input:
Handle uncaught OAuth errors.
### Response:
def error_handler(f):
"""Handle uncaught OAuth errors."""
@wraps(f)
def decorated(*args, **kwargs):
try:
return f(*args, **kwargs)
except OAuth2Error as e:
# Only FatalClientError are handled by Flask-OAuthlib (as these
# errors should not be redirect back to the client - see
# http://tools.ietf.org/html/rfc6749#section-4.2.2.1)
if hasattr(e, 'redirect_uri'):
return redirect(e.in_uri(e.redirect_uri))
else:
return redirect(e.in_uri(oauth2.error_uri))
return decorated |
def send_text(self, chat_id, content, safe=0):
"""
发送文本消息
详情请参考:https://work.weixin.qq.com/api/doc#90000/90135/90248/文本消息/
:param chat_id: 群聊id
:param content: 消息内容
:param safe: 表示是否是保密消息,0表示否,1表示是,默认0
:return:
"""
return self.send(chat_id, 'text', safe=safe, content=content) | 发送文本消息
详情请参考:https://work.weixin.qq.com/api/doc#90000/90135/90248/文本消息/
:param chat_id: 群聊id
:param content: 消息内容
:param safe: 表示是否是保密消息,0表示否,1表示是,默认0
:return: | Below is the the instruction that describes the task:
### Input:
发送文本消息
详情请参考:https://work.weixin.qq.com/api/doc#90000/90135/90248/文本消息/
:param chat_id: 群聊id
:param content: 消息内容
:param safe: 表示是否是保密消息,0表示否,1表示是,默认0
:return:
### Response:
def send_text(self, chat_id, content, safe=0):
"""
发送文本消息
详情请参考:https://work.weixin.qq.com/api/doc#90000/90135/90248/文本消息/
:param chat_id: 群聊id
:param content: 消息内容
:param safe: 表示是否是保密消息,0表示否,1表示是,默认0
:return:
"""
return self.send(chat_id, 'text', safe=safe, content=content) |
def request_announcement_view(request):
"""The request announcement page."""
if request.method == "POST":
form = AnnouncementRequestForm(request.POST)
logger.debug(form)
logger.debug(form.data)
if form.is_valid():
teacher_objs = form.cleaned_data["teachers_requested"]
logger.debug("teacher objs:")
logger.debug(teacher_objs)
if len(teacher_objs) > 2:
messages.error(request, "Please select a maximum of 2 teachers to approve this post.")
else:
obj = form.save(commit=True)
obj.user = request.user
# SAFE HTML
obj.content = safe_html(obj.content)
obj.save()
ann = AnnouncementRequest.objects.get(id=obj.id)
logger.debug(teacher_objs)
approve_self = False
for teacher in teacher_objs:
ann.teachers_requested.add(teacher)
if teacher == request.user:
approve_self = True
ann.save()
if approve_self:
ann.teachers_approved.add(teacher)
ann.save()
if settings.SEND_ANNOUNCEMENT_APPROVAL:
admin_request_announcement_email(request, form, ann)
ann.admin_email_sent = True
ann.save()
return redirect("request_announcement_success_self")
else:
if settings.SEND_ANNOUNCEMENT_APPROVAL:
request_announcement_email(request, form, obj)
return redirect("request_announcement_success")
return redirect("index")
else:
messages.error(request, "Error adding announcement request")
else:
form = AnnouncementRequestForm()
return render(request, "announcements/request.html", {"form": form, "action": "add"}) | The request announcement page. | Below is the the instruction that describes the task:
### Input:
The request announcement page.
### Response:
def request_announcement_view(request):
"""The request announcement page."""
if request.method == "POST":
form = AnnouncementRequestForm(request.POST)
logger.debug(form)
logger.debug(form.data)
if form.is_valid():
teacher_objs = form.cleaned_data["teachers_requested"]
logger.debug("teacher objs:")
logger.debug(teacher_objs)
if len(teacher_objs) > 2:
messages.error(request, "Please select a maximum of 2 teachers to approve this post.")
else:
obj = form.save(commit=True)
obj.user = request.user
# SAFE HTML
obj.content = safe_html(obj.content)
obj.save()
ann = AnnouncementRequest.objects.get(id=obj.id)
logger.debug(teacher_objs)
approve_self = False
for teacher in teacher_objs:
ann.teachers_requested.add(teacher)
if teacher == request.user:
approve_self = True
ann.save()
if approve_self:
ann.teachers_approved.add(teacher)
ann.save()
if settings.SEND_ANNOUNCEMENT_APPROVAL:
admin_request_announcement_email(request, form, ann)
ann.admin_email_sent = True
ann.save()
return redirect("request_announcement_success_self")
else:
if settings.SEND_ANNOUNCEMENT_APPROVAL:
request_announcement_email(request, form, obj)
return redirect("request_announcement_success")
return redirect("index")
else:
messages.error(request, "Error adding announcement request")
else:
form = AnnouncementRequestForm()
return render(request, "announcements/request.html", {"form": form, "action": "add"}) |
def update(ctx, migrate=False):
'''Perform a development update'''
msg = 'Update all dependencies'
if migrate:
msg += ' and migrate data'
header(msg)
info('Updating Python dependencies')
lrun('pip install -r requirements/develop.pip')
lrun('pip install -e .')
info('Updating JavaScript dependencies')
lrun('npm install')
if migrate:
info('Migrating database')
lrun('udata db migrate') | Perform a development update | Below is the the instruction that describes the task:
### Input:
Perform a development update
### Response:
def update(ctx, migrate=False):
'''Perform a development update'''
msg = 'Update all dependencies'
if migrate:
msg += ' and migrate data'
header(msg)
info('Updating Python dependencies')
lrun('pip install -r requirements/develop.pip')
lrun('pip install -e .')
info('Updating JavaScript dependencies')
lrun('npm install')
if migrate:
info('Migrating database')
lrun('udata db migrate') |
def _init_module_cache():
"""
Module caching, it helps with not having to import again and again same modules.
@return: boolean, True if module caching has been done, False if module caching was already done.
"""
# While there are not loaded modules, load these ones
if len(FieldTranslation._modules) < len(FieldTranslation._model_module_paths):
for module_path in FieldTranslation._model_module_paths:
FieldTranslation._modules[module_path] = importlib.import_module(module_path)
return True
return False | Module caching, it helps with not having to import again and again same modules.
@return: boolean, True if module caching has been done, False if module caching was already done. | Below is the the instruction that describes the task:
### Input:
Module caching, it helps with not having to import again and again same modules.
@return: boolean, True if module caching has been done, False if module caching was already done.
### Response:
def _init_module_cache():
"""
Module caching, it helps with not having to import again and again same modules.
@return: boolean, True if module caching has been done, False if module caching was already done.
"""
# While there are not loaded modules, load these ones
if len(FieldTranslation._modules) < len(FieldTranslation._model_module_paths):
for module_path in FieldTranslation._model_module_paths:
FieldTranslation._modules[module_path] = importlib.import_module(module_path)
return True
return False |
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)] | Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off. | Below is the the instruction that describes the task:
### Input:
Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
### Response:
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)] |
def override_environment(settings, **kwargs):
# type: (Settings, **str) -> Generator
"""
Override env vars and reload the Settings object
NOTE:
Obviously this context has to be in place before you import any
module which reads env values at import time.
NOTE:
The values in `kwargs` must be strings else you will get a cryptic:
TypeError: execve() arg 3 contains a non-string value
"""
old_env = os.environ.copy()
os.environ.update(kwargs)
settings._reload()
try:
yield
except Exception:
raise
finally:
for key in kwargs.keys():
del os.environ[key]
os.environ.update(old_env)
settings._reload() | Override env vars and reload the Settings object
NOTE:
Obviously this context has to be in place before you import any
module which reads env values at import time.
NOTE:
The values in `kwargs` must be strings else you will get a cryptic:
TypeError: execve() arg 3 contains a non-string value | Below is the the instruction that describes the task:
### Input:
Override env vars and reload the Settings object
NOTE:
Obviously this context has to be in place before you import any
module which reads env values at import time.
NOTE:
The values in `kwargs` must be strings else you will get a cryptic:
TypeError: execve() arg 3 contains a non-string value
### Response:
def override_environment(settings, **kwargs):
# type: (Settings, **str) -> Generator
"""
Override env vars and reload the Settings object
NOTE:
Obviously this context has to be in place before you import any
module which reads env values at import time.
NOTE:
The values in `kwargs` must be strings else you will get a cryptic:
TypeError: execve() arg 3 contains a non-string value
"""
old_env = os.environ.copy()
os.environ.update(kwargs)
settings._reload()
try:
yield
except Exception:
raise
finally:
for key in kwargs.keys():
del os.environ[key]
os.environ.update(old_env)
settings._reload() |
def do_unfullscreen(self, widget):
"""
Widget Action to set Windowed Mode.
"""
self.unfullscreen()
self.is_fullscreen = False
self.bot._screen_ratio = None | Widget Action to set Windowed Mode. | Below is the the instruction that describes the task:
### Input:
Widget Action to set Windowed Mode.
### Response:
def do_unfullscreen(self, widget):
"""
Widget Action to set Windowed Mode.
"""
self.unfullscreen()
self.is_fullscreen = False
self.bot._screen_ratio = None |
def get_contact_list(self, list_filter, skip=None, count=None, scope='graph/read'):
"""
Retrieve the Mxit user's full contact list
User authentication required with the following scope: 'graph/read'
"""
params = {
'filter': list_filter
}
if skip:
params['skip'] = skip
if count:
params['count'] = count
contact_list = _get(
token=self.oauth.get_user_token(scope),
uri='/user/socialgraph/contactlist?' + urllib.urlencode(params)
)
try:
return json.loads(contact_list)
except:
raise MxitAPIException('Error parsing contact_list data') | Retrieve the Mxit user's full contact list
User authentication required with the following scope: 'graph/read' | Below is the the instruction that describes the task:
### Input:
Retrieve the Mxit user's full contact list
User authentication required with the following scope: 'graph/read'
### Response:
def get_contact_list(self, list_filter, skip=None, count=None, scope='graph/read'):
"""
Retrieve the Mxit user's full contact list
User authentication required with the following scope: 'graph/read'
"""
params = {
'filter': list_filter
}
if skip:
params['skip'] = skip
if count:
params['count'] = count
contact_list = _get(
token=self.oauth.get_user_token(scope),
uri='/user/socialgraph/contactlist?' + urllib.urlencode(params)
)
try:
return json.loads(contact_list)
except:
raise MxitAPIException('Error parsing contact_list data') |
def from_dict(self, d):
"""
Set this person from dict
:param d: Dictionary representing a person ('sitting'[, 'id'])
:type d: dict
:rtype: Person
:raises KeyError: 'sitting' not set
"""
self.sitting = d['sitting']
self.id = d.get('id', None)
return self | Set this person from dict
:param d: Dictionary representing a person ('sitting'[, 'id'])
:type d: dict
:rtype: Person
:raises KeyError: 'sitting' not set | Below is the the instruction that describes the task:
### Input:
Set this person from dict
:param d: Dictionary representing a person ('sitting'[, 'id'])
:type d: dict
:rtype: Person
:raises KeyError: 'sitting' not set
### Response:
def from_dict(self, d):
"""
Set this person from dict
:param d: Dictionary representing a person ('sitting'[, 'id'])
:type d: dict
:rtype: Person
:raises KeyError: 'sitting' not set
"""
self.sitting = d['sitting']
self.id = d.get('id', None)
return self |
def remove(self, key, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except Exception:
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
if s is not None:
s._f_remove(recursive=True)
return None
# remove the node
if com._all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop) | Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store | Below is the the instruction that describes the task:
### Input:
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
### Response:
def remove(self, key, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except Exception:
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
if s is not None:
s._f_remove(recursive=True)
return None
# remove the node
if com._all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop) |
def AddList(self, listName, description, templateID):
"""Create a new List
Provide: List Name, List Description, and List Template
Templates Include:
Announcements
Contacts
Custom List
Custom List in Datasheet View
DataSources
Discussion Board
Document Library
Events
Form Library
Issues
Links
Picture Library
Survey
Tasks
"""
templateIDs = {'Announcements': '104',
'Contacts': '105',
'Custom List': '100',
'Custom List in Datasheet View': '120',
'DataSources': '110',
'Discussion Board': '108',
'Document Library': '101',
'Events': '106',
'Form Library': '115',
'Issues': '1100',
'Links': '103',
'Picture Library': '109',
'Survey': '102',
'Tasks': '107'}
IDnums = [100, 101, 102, 103, 104, 105, 106,
107, 108, 109, 110, 115, 120, 1100]
# Let's automatically convert the different
# ways we can select the templateID
if type(templateID) == int:
templateID = str(templateID)
elif type(templateID) == str:
if templateID.isdigit():
pass
else:
templateID = templateIDs[templateID]
# Build Request
soap_request = soap('AddList')
soap_request.add_parameter('listName', listName)
soap_request.add_parameter('description', description)
soap_request.add_parameter('templateID', templateID)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('AddList'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Request
print(response)
if response == 200:
return response.text
else:
return response | Create a new List
Provide: List Name, List Description, and List Template
Templates Include:
Announcements
Contacts
Custom List
Custom List in Datasheet View
DataSources
Discussion Board
Document Library
Events
Form Library
Issues
Links
Picture Library
Survey
Tasks | Below is the the instruction that describes the task:
### Input:
Create a new List
Provide: List Name, List Description, and List Template
Templates Include:
Announcements
Contacts
Custom List
Custom List in Datasheet View
DataSources
Discussion Board
Document Library
Events
Form Library
Issues
Links
Picture Library
Survey
Tasks
### Response:
def AddList(self, listName, description, templateID):
"""Create a new List
Provide: List Name, List Description, and List Template
Templates Include:
Announcements
Contacts
Custom List
Custom List in Datasheet View
DataSources
Discussion Board
Document Library
Events
Form Library
Issues
Links
Picture Library
Survey
Tasks
"""
templateIDs = {'Announcements': '104',
'Contacts': '105',
'Custom List': '100',
'Custom List in Datasheet View': '120',
'DataSources': '110',
'Discussion Board': '108',
'Document Library': '101',
'Events': '106',
'Form Library': '115',
'Issues': '1100',
'Links': '103',
'Picture Library': '109',
'Survey': '102',
'Tasks': '107'}
IDnums = [100, 101, 102, 103, 104, 105, 106,
107, 108, 109, 110, 115, 120, 1100]
# Let's automatically convert the different
# ways we can select the templateID
if type(templateID) == int:
templateID = str(templateID)
elif type(templateID) == str:
if templateID.isdigit():
pass
else:
templateID = templateIDs[templateID]
# Build Request
soap_request = soap('AddList')
soap_request.add_parameter('listName', listName)
soap_request.add_parameter('description', description)
soap_request.add_parameter('templateID', templateID)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('AddList'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Request
print(response)
if response == 200:
return response.text
else:
return response |
def senaite_url_fetcher(url):
"""Uses plone.subrequest to fetch an internal image resource.
If the URL points to an external resource, the URL is handed
to weasyprint.default_url_fetcher.
Please see these links for details:
- https://github.com/plone/plone.subrequest
- https://pypi.python.org/pypi/plone.subrequest
- https://github.com/senaite/senaite.core/issues/538
:returns: A dict with the following keys:
* One of ``string`` (a byte string) or ``file_obj``
(a file-like object)
* Optionally: ``mime_type``, a MIME type extracted e.g. from a
*Content-Type* header. If not provided, the type is guessed from the
file extension in the URL.
* Optionally: ``encoding``, a character encoding extracted e.g. from a
*charset* parameter in a *Content-Type* header
* Optionally: ``redirected_url``, the actual URL of the resource
if there were e.g. HTTP redirects.
* Optionally: ``filename``, the filename of the resource. Usually
derived from the *filename* parameter in a *Content-Disposition*
header
If a ``file_obj`` key is given, it is the caller’s responsibility
to call ``file_obj.close()``.
"""
logger.info("Fetching URL '{}' for WeasyPrint".format(url))
# get the pyhsical path from the URL
request = api.get_request()
host = request.get_header("HOST")
path = "/".join(request.physicalPathFromURL(url))
# fetch the object by sub-request
portal = api.get_portal()
context = portal.restrictedTraverse(path, None)
# We double check here to avoid an edge case, where we have the same path
# as well in our local site, e.g. we have `/senaite/img/systems/senaite.png`,
# but the user requested http://www.ridingbytes.com/img/systems/senaite.png:
#
# "/".join(request.physicalPathFromURL("http://www.ridingbytes.com/img/systems/senaite.png"))
# '/senaite/img/systems/senaite.png'
if context is None or host not in url:
logger.info("URL is external, passing over to the default URL fetcher...")
return default_url_fetcher(url)
logger.info("URL is local, fetching data by path '{}' via subrequest".format(path))
# get the data via an authenticated subrequest
response = subrequest(path)
# Prepare the return data as required by WeasyPrint
string = response.getBody()
filename = url.split("/")[-1]
mime_type = mimetypes.guess_type(url)[0]
redirected_url = url
return {
"string": string,
"filename": filename,
"mime_type": mime_type,
"redirected_url": redirected_url,
} | Uses plone.subrequest to fetch an internal image resource.
If the URL points to an external resource, the URL is handed
to weasyprint.default_url_fetcher.
Please see these links for details:
- https://github.com/plone/plone.subrequest
- https://pypi.python.org/pypi/plone.subrequest
- https://github.com/senaite/senaite.core/issues/538
:returns: A dict with the following keys:
* One of ``string`` (a byte string) or ``file_obj``
(a file-like object)
* Optionally: ``mime_type``, a MIME type extracted e.g. from a
*Content-Type* header. If not provided, the type is guessed from the
file extension in the URL.
* Optionally: ``encoding``, a character encoding extracted e.g. from a
*charset* parameter in a *Content-Type* header
* Optionally: ``redirected_url``, the actual URL of the resource
if there were e.g. HTTP redirects.
* Optionally: ``filename``, the filename of the resource. Usually
derived from the *filename* parameter in a *Content-Disposition*
header
If a ``file_obj`` key is given, it is the caller’s responsibility
to call ``file_obj.close()``. | Below is the the instruction that describes the task:
### Input:
Uses plone.subrequest to fetch an internal image resource.
If the URL points to an external resource, the URL is handed
to weasyprint.default_url_fetcher.
Please see these links for details:
- https://github.com/plone/plone.subrequest
- https://pypi.python.org/pypi/plone.subrequest
- https://github.com/senaite/senaite.core/issues/538
:returns: A dict with the following keys:
* One of ``string`` (a byte string) or ``file_obj``
(a file-like object)
* Optionally: ``mime_type``, a MIME type extracted e.g. from a
*Content-Type* header. If not provided, the type is guessed from the
file extension in the URL.
* Optionally: ``encoding``, a character encoding extracted e.g. from a
*charset* parameter in a *Content-Type* header
* Optionally: ``redirected_url``, the actual URL of the resource
if there were e.g. HTTP redirects.
* Optionally: ``filename``, the filename of the resource. Usually
derived from the *filename* parameter in a *Content-Disposition*
header
If a ``file_obj`` key is given, it is the caller’s responsibility
to call ``file_obj.close()``.
### Response:
def senaite_url_fetcher(url):
"""Uses plone.subrequest to fetch an internal image resource.
If the URL points to an external resource, the URL is handed
to weasyprint.default_url_fetcher.
Please see these links for details:
- https://github.com/plone/plone.subrequest
- https://pypi.python.org/pypi/plone.subrequest
- https://github.com/senaite/senaite.core/issues/538
:returns: A dict with the following keys:
* One of ``string`` (a byte string) or ``file_obj``
(a file-like object)
* Optionally: ``mime_type``, a MIME type extracted e.g. from a
*Content-Type* header. If not provided, the type is guessed from the
file extension in the URL.
* Optionally: ``encoding``, a character encoding extracted e.g. from a
*charset* parameter in a *Content-Type* header
* Optionally: ``redirected_url``, the actual URL of the resource
if there were e.g. HTTP redirects.
* Optionally: ``filename``, the filename of the resource. Usually
derived from the *filename* parameter in a *Content-Disposition*
header
If a ``file_obj`` key is given, it is the caller’s responsibility
to call ``file_obj.close()``.
"""
logger.info("Fetching URL '{}' for WeasyPrint".format(url))
# get the pyhsical path from the URL
request = api.get_request()
host = request.get_header("HOST")
path = "/".join(request.physicalPathFromURL(url))
# fetch the object by sub-request
portal = api.get_portal()
context = portal.restrictedTraverse(path, None)
# We double check here to avoid an edge case, where we have the same path
# as well in our local site, e.g. we have `/senaite/img/systems/senaite.png`,
# but the user requested http://www.ridingbytes.com/img/systems/senaite.png:
#
# "/".join(request.physicalPathFromURL("http://www.ridingbytes.com/img/systems/senaite.png"))
# '/senaite/img/systems/senaite.png'
if context is None or host not in url:
logger.info("URL is external, passing over to the default URL fetcher...")
return default_url_fetcher(url)
logger.info("URL is local, fetching data by path '{}' via subrequest".format(path))
# get the data via an authenticated subrequest
response = subrequest(path)
# Prepare the return data as required by WeasyPrint
string = response.getBody()
filename = url.split("/")[-1]
mime_type = mimetypes.guess_type(url)[0]
redirected_url = url
return {
"string": string,
"filename": filename,
"mime_type": mime_type,
"redirected_url": redirected_url,
} |
def logChange(self, nowTime, sig, nextVal):
"""
This method is called for every value change of any signal.
"""
try:
self.vcdWriter.logChange(nowTime, sig, nextVal)
except KeyError:
# not every signal has to be registered
pass | This method is called for every value change of any signal. | Below is the the instruction that describes the task:
### Input:
This method is called for every value change of any signal.
### Response:
def logChange(self, nowTime, sig, nextVal):
"""
This method is called for every value change of any signal.
"""
try:
self.vcdWriter.logChange(nowTime, sig, nextVal)
except KeyError:
# not every signal has to be registered
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.