code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _set_listener(instance, obs):
"""Add listeners to a HasProperties instance"""
if obs.names is everything:
names = list(instance._props)
else:
names = obs.names
for name in names:
if name not in instance._listeners:
instance._listeners[name] = {typ: [] for typ in LISTENER_TYPES}
instance._listeners[name][obs.mode] += [obs] | Add listeners to a HasProperties instance | Below is the the instruction that describes the task:
### Input:
Add listeners to a HasProperties instance
### Response:
def _set_listener(instance, obs):
"""Add listeners to a HasProperties instance"""
if obs.names is everything:
names = list(instance._props)
else:
names = obs.names
for name in names:
if name not in instance._listeners:
instance._listeners[name] = {typ: [] for typ in LISTENER_TYPES}
instance._listeners[name][obs.mode] += [obs] |
def get_any_nt_unit_rule(g):
"""Returns a non-terminal unit rule from 'g', or None if there is none."""
for rule in g.rules:
if len(rule.rhs) == 1 and isinstance(rule.rhs[0], NT):
return rule
return None | Returns a non-terminal unit rule from 'g', or None if there is none. | Below is the the instruction that describes the task:
### Input:
Returns a non-terminal unit rule from 'g', or None if there is none.
### Response:
def get_any_nt_unit_rule(g):
"""Returns a non-terminal unit rule from 'g', or None if there is none."""
for rule in g.rules:
if len(rule.rhs) == 1 and isinstance(rule.rhs[0], NT):
return rule
return None |
def area(self):
r"""The area of the current curved polygon.
This assumes, but does not check, that the current curved polygon
is valid (i.e. it is bounded by the edges).
This computes the area via Green's theorem. Using the vector field
:math:`\mathbf{F} = \left[-y, x\right]^T`, since
:math:`\partial_x(x) - \partial_y(-y) = 2` Green's theorem says
.. math::
\int_{\mathcal{P}} 2 \, d\textbf{x} =
\int_{\partial \mathcal{P}} -y \, dx + x \, dy
(where :math:`\mathcal{P}` is the current curved polygon).
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current curved polygon.
"""
edges = tuple(edge._nodes for edge in self._edges)
return _surface_helpers.compute_area(edges) | r"""The area of the current curved polygon.
This assumes, but does not check, that the current curved polygon
is valid (i.e. it is bounded by the edges).
This computes the area via Green's theorem. Using the vector field
:math:`\mathbf{F} = \left[-y, x\right]^T`, since
:math:`\partial_x(x) - \partial_y(-y) = 2` Green's theorem says
.. math::
\int_{\mathcal{P}} 2 \, d\textbf{x} =
\int_{\partial \mathcal{P}} -y \, dx + x \, dy
(where :math:`\mathcal{P}` is the current curved polygon).
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current curved polygon. | Below is the the instruction that describes the task:
### Input:
r"""The area of the current curved polygon.
This assumes, but does not check, that the current curved polygon
is valid (i.e. it is bounded by the edges).
This computes the area via Green's theorem. Using the vector field
:math:`\mathbf{F} = \left[-y, x\right]^T`, since
:math:`\partial_x(x) - \partial_y(-y) = 2` Green's theorem says
.. math::
\int_{\mathcal{P}} 2 \, d\textbf{x} =
\int_{\partial \mathcal{P}} -y \, dx + x \, dy
(where :math:`\mathcal{P}` is the current curved polygon).
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current curved polygon.
### Response:
def area(self):
r"""The area of the current curved polygon.
This assumes, but does not check, that the current curved polygon
is valid (i.e. it is bounded by the edges).
This computes the area via Green's theorem. Using the vector field
:math:`\mathbf{F} = \left[-y, x\right]^T`, since
:math:`\partial_x(x) - \partial_y(-y) = 2` Green's theorem says
.. math::
\int_{\mathcal{P}} 2 \, d\textbf{x} =
\int_{\partial \mathcal{P}} -y \, dx + x \, dy
(where :math:`\mathcal{P}` is the current curved polygon).
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current curved polygon.
"""
edges = tuple(edge._nodes for edge in self._edges)
return _surface_helpers.compute_area(edges) |
def get_single_generation(self, table, db='default'):
"""Creates a random generation value for a single table name"""
key = self.keygen.gen_table_key(table, db)
val = self.cache_backend.get(key, None, db)
#if local.get('in_test', None): print force_bytes(val).ljust(32), key
if val is None:
val = self.keygen.random_generator()
self.cache_backend.set(key, val, settings.MIDDLEWARE_SECONDS, db)
return val | Creates a random generation value for a single table name | Below is the the instruction that describes the task:
### Input:
Creates a random generation value for a single table name
### Response:
def get_single_generation(self, table, db='default'):
"""Creates a random generation value for a single table name"""
key = self.keygen.gen_table_key(table, db)
val = self.cache_backend.get(key, None, db)
#if local.get('in_test', None): print force_bytes(val).ljust(32), key
if val is None:
val = self.keygen.random_generator()
self.cache_backend.set(key, val, settings.MIDDLEWARE_SECONDS, db)
return val |
def _crop_box(self, size):
"""
Helper that calculates the crop box for the offset within the image.
:param size: The width and height of the image composition.
:type size: tuple
:returns: The bounding box of the image, given ``size``.
:rtype: tuple
"""
(left, top) = self.offset
right = left + min(size[0], self.width)
bottom = top + min(size[1], self.height)
return (left, top, right, bottom) | Helper that calculates the crop box for the offset within the image.
:param size: The width and height of the image composition.
:type size: tuple
:returns: The bounding box of the image, given ``size``.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Helper that calculates the crop box for the offset within the image.
:param size: The width and height of the image composition.
:type size: tuple
:returns: The bounding box of the image, given ``size``.
:rtype: tuple
### Response:
def _crop_box(self, size):
"""
Helper that calculates the crop box for the offset within the image.
:param size: The width and height of the image composition.
:type size: tuple
:returns: The bounding box of the image, given ``size``.
:rtype: tuple
"""
(left, top) = self.offset
right = left + min(size[0], self.width)
bottom = top + min(size[1], self.height)
return (left, top, right, bottom) |
def _extractText(self, format):
"""_extractText(self, format) -> PyObject *"""
val = _fitz.TextPage__extractText(self, format)
if format != 2:
return val
import base64, json
class b64encode(json.JSONEncoder):
def default(self,s):
if not fitz_py2 and type(s) is bytes:
return base64.b64encode(s).decode()
if type(s) is bytearray:
if fitz_py2:
return base64.b64encode(s)
else:
return base64.b64encode(s).decode()
val = json.dumps(val, separators=(",", ":"), cls=b64encode, indent=1)
return val | _extractText(self, format) -> PyObject * | Below is the the instruction that describes the task:
### Input:
_extractText(self, format) -> PyObject *
### Response:
def _extractText(self, format):
"""_extractText(self, format) -> PyObject *"""
val = _fitz.TextPage__extractText(self, format)
if format != 2:
return val
import base64, json
class b64encode(json.JSONEncoder):
def default(self,s):
if not fitz_py2 and type(s) is bytes:
return base64.b64encode(s).decode()
if type(s) is bytearray:
if fitz_py2:
return base64.b64encode(s)
else:
return base64.b64encode(s).decode()
val = json.dumps(val, separators=(",", ":"), cls=b64encode, indent=1)
return val |
def disable(self):
"""
Disable the Cloud.
:returns: A list of mist.clients' updated clouds.
"""
payload = {
"new_state": "0"
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+'/clouds/'+self.id, data=data)
req.post()
self.enabled = False
self.mist_client.update_clouds() | Disable the Cloud.
:returns: A list of mist.clients' updated clouds. | Below is the the instruction that describes the task:
### Input:
Disable the Cloud.
:returns: A list of mist.clients' updated clouds.
### Response:
def disable(self):
"""
Disable the Cloud.
:returns: A list of mist.clients' updated clouds.
"""
payload = {
"new_state": "0"
}
data = json.dumps(payload)
req = self.request(self.mist_client.uri+'/clouds/'+self.id, data=data)
req.post()
self.enabled = False
self.mist_client.update_clouds() |
def fulltext_item(self, itemkey, **kwargs):
""" Get full-text content for an item"""
query_string = "/{t}/{u}/items/{itemkey}/fulltext".format(
t=self.library_type, u=self.library_id, itemkey=itemkey
)
return self._build_query(query_string) | Get full-text content for an item | Below is the the instruction that describes the task:
### Input:
Get full-text content for an item
### Response:
def fulltext_item(self, itemkey, **kwargs):
""" Get full-text content for an item"""
query_string = "/{t}/{u}/items/{itemkey}/fulltext".format(
t=self.library_type, u=self.library_id, itemkey=itemkey
)
return self._build_query(query_string) |
def _get_gae_credentials():
"""Gets Google App Engine App Identity credentials and project ID."""
# While this library is normally bundled with app_engine, there are
# some cases where it's not available, so we tolerate ImportError.
try:
import google.auth.app_engine as app_engine
except ImportError:
return None, None
try:
credentials = app_engine.Credentials()
project_id = app_engine.get_project_id()
return credentials, project_id
except EnvironmentError:
return None, None | Gets Google App Engine App Identity credentials and project ID. | Below is the the instruction that describes the task:
### Input:
Gets Google App Engine App Identity credentials and project ID.
### Response:
def _get_gae_credentials():
"""Gets Google App Engine App Identity credentials and project ID."""
# While this library is normally bundled with app_engine, there are
# some cases where it's not available, so we tolerate ImportError.
try:
import google.auth.app_engine as app_engine
except ImportError:
return None, None
try:
credentials = app_engine.Credentials()
project_id = app_engine.get_project_id()
return credentials, project_id
except EnvironmentError:
return None, None |
def assertit(self):
"""Assert the fact within CLIPS."""
data = clips.data.DataObject(self._env)
data.value = list(self._multifield)
if lib.EnvPutFactSlot(
self._env, self._fact, ffi.NULL, data.byref) != 1:
raise CLIPSError(self._env)
super(ImpliedFact, self).assertit() | Assert the fact within CLIPS. | Below is the the instruction that describes the task:
### Input:
Assert the fact within CLIPS.
### Response:
def assertit(self):
"""Assert the fact within CLIPS."""
data = clips.data.DataObject(self._env)
data.value = list(self._multifield)
if lib.EnvPutFactSlot(
self._env, self._fact, ffi.NULL, data.byref) != 1:
raise CLIPSError(self._env)
super(ImpliedFact, self).assertit() |
def subscribe(self, subscriber: 'Subscriber',
prepend: bool = False) -> SubscriptionDisposable:
""" Subscribing the given subscriber.
:param subscriber: subscriber to add
:param prepend: For internal use - usually the subscribers will be
added at the end of a list. When prepend is True, it will be added
in front of the list. This will habe an effect in the order the
subscribers are called.
:raises SubscriptionError: if subscriber already subscribed
"""
# `subscriber in self._subscriptions` is not working because
# tuple.__contains__ is using __eq__ which is overwritten and returns
# a new publisher - not helpful here
if any(subscriber is s for s in self._subscriptions):
raise SubscriptionError('Subscriber already registered')
if prepend:
self._subscriptions.insert(0, subscriber)
else:
self._subscriptions.append(subscriber)
return SubscriptionDisposable(self, subscriber) | Subscribing the given subscriber.
:param subscriber: subscriber to add
:param prepend: For internal use - usually the subscribers will be
added at the end of a list. When prepend is True, it will be added
in front of the list. This will habe an effect in the order the
subscribers are called.
:raises SubscriptionError: if subscriber already subscribed | Below is the the instruction that describes the task:
### Input:
Subscribing the given subscriber.
:param subscriber: subscriber to add
:param prepend: For internal use - usually the subscribers will be
added at the end of a list. When prepend is True, it will be added
in front of the list. This will habe an effect in the order the
subscribers are called.
:raises SubscriptionError: if subscriber already subscribed
### Response:
def subscribe(self, subscriber: 'Subscriber',
prepend: bool = False) -> SubscriptionDisposable:
""" Subscribing the given subscriber.
:param subscriber: subscriber to add
:param prepend: For internal use - usually the subscribers will be
added at the end of a list. When prepend is True, it will be added
in front of the list. This will habe an effect in the order the
subscribers are called.
:raises SubscriptionError: if subscriber already subscribed
"""
# `subscriber in self._subscriptions` is not working because
# tuple.__contains__ is using __eq__ which is overwritten and returns
# a new publisher - not helpful here
if any(subscriber is s for s in self._subscriptions):
raise SubscriptionError('Subscriber already registered')
if prepend:
self._subscriptions.insert(0, subscriber)
else:
self._subscriptions.append(subscriber)
return SubscriptionDisposable(self, subscriber) |
def mergemessage(self, message):
""" parse OueueMessage in Model vars """
if isinstance(message, QueueMessage):
""" merge queue message vars """
for key, value in vars(message).items():
if not value is None:
setattr(self, key, value)
if (key == 'content'):
content = literal_eval(message.content)
for metakey, metavalue in content.items():
default = getattr(self, metakey, None)
if not default is None:
if isinstance(default, datetime.date):
setattr(self, metakey, safe_cast(metavalue, datetime.date, dformat=self._dateformat))
if isinstance(default, datetime.datetime):
setattr(self, metakey, safe_cast(metavalue, datetime.date, dformat=self._datetimeformat))
else:
setattr(self, metakey, metavalue) | parse OueueMessage in Model vars | Below is the the instruction that describes the task:
### Input:
parse OueueMessage in Model vars
### Response:
def mergemessage(self, message):
""" parse OueueMessage in Model vars """
if isinstance(message, QueueMessage):
""" merge queue message vars """
for key, value in vars(message).items():
if not value is None:
setattr(self, key, value)
if (key == 'content'):
content = literal_eval(message.content)
for metakey, metavalue in content.items():
default = getattr(self, metakey, None)
if not default is None:
if isinstance(default, datetime.date):
setattr(self, metakey, safe_cast(metavalue, datetime.date, dformat=self._dateformat))
if isinstance(default, datetime.datetime):
setattr(self, metakey, safe_cast(metavalue, datetime.date, dformat=self._datetimeformat))
else:
setattr(self, metakey, metavalue) |
def veas2tas(eas, h):
""" Equivalent airspeed to true airspeed """
rho = vdensity(h)
tas = eas * np.sqrt(rho0 / rho)
return tas | Equivalent airspeed to true airspeed | Below is the the instruction that describes the task:
### Input:
Equivalent airspeed to true airspeed
### Response:
def veas2tas(eas, h):
""" Equivalent airspeed to true airspeed """
rho = vdensity(h)
tas = eas * np.sqrt(rho0 / rho)
return tas |
def connect_all(self):
"""[Re-]connects all signals and slots.
If already in "connected" state, ignores the call.
"""
if self.__connected:
return # assert not self.__connected, "connect_all() already in \"connected\" state"
with self.__lock:
for signal in self.__signals:
self.__connect_signal(signal)
if self.__slot is not None:
self.__sigDelayed.connect(self.__slot, Qt.QueuedConnection)
self.__connected = True | [Re-]connects all signals and slots.
If already in "connected" state, ignores the call. | Below is the the instruction that describes the task:
### Input:
[Re-]connects all signals and slots.
If already in "connected" state, ignores the call.
### Response:
def connect_all(self):
"""[Re-]connects all signals and slots.
If already in "connected" state, ignores the call.
"""
if self.__connected:
return # assert not self.__connected, "connect_all() already in \"connected\" state"
with self.__lock:
for signal in self.__signals:
self.__connect_signal(signal)
if self.__slot is not None:
self.__sigDelayed.connect(self.__slot, Qt.QueuedConnection)
self.__connected = True |
def _WaitForStartup(self, deadline):
"""Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise.
"""
start = time.time()
sleep = 0.05
def Elapsed():
return time.time() - start
while True:
try:
response, _ = self._http.request(self._host)
if response.status == 200:
logging.info('emulator responded after %f seconds', Elapsed())
return True
except (socket.error, httplib.ResponseNotReady):
pass
if Elapsed() >= deadline:
# Out of time; give up.
return False
else:
time.sleep(sleep)
sleep *= 2 | Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise. | Below is the the instruction that describes the task:
### Input:
Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise.
### Response:
def _WaitForStartup(self, deadline):
"""Waits for the emulator to start.
Args:
deadline: deadline in seconds
Returns:
True if the emulator responds within the deadline, False otherwise.
"""
start = time.time()
sleep = 0.05
def Elapsed():
return time.time() - start
while True:
try:
response, _ = self._http.request(self._host)
if response.status == 200:
logging.info('emulator responded after %f seconds', Elapsed())
return True
except (socket.error, httplib.ResponseNotReady):
pass
if Elapsed() >= deadline:
# Out of time; give up.
return False
else:
time.sleep(sleep)
sleep *= 2 |
def _next_iter_line(self, row_num):
"""
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num : The row number of the line being parsed.
"""
try:
return next(self.data)
except csv.Error as e:
if self.warn_bad_lines or self.error_bad_lines:
msg = str(e)
if 'NULL byte' in msg:
msg = ('NULL byte detected. This byte '
'cannot be processed in Python\'s '
'native csv library at the moment, '
'so please pass in engine=\'c\' instead')
if self.skipfooter > 0:
reason = ('Error could possibly be due to '
'parsing errors in the skipped footer rows '
'(the skipfooter keyword is only applied '
'after Python\'s csv library has parsed '
'all rows).')
msg += '. ' + reason
self._alert_malformed(msg, row_num)
return None | Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num : The row number of the line being parsed. | Below is the the instruction that describes the task:
### Input:
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num : The row number of the line being parsed.
### Response:
def _next_iter_line(self, row_num):
"""
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num : The row number of the line being parsed.
"""
try:
return next(self.data)
except csv.Error as e:
if self.warn_bad_lines or self.error_bad_lines:
msg = str(e)
if 'NULL byte' in msg:
msg = ('NULL byte detected. This byte '
'cannot be processed in Python\'s '
'native csv library at the moment, '
'so please pass in engine=\'c\' instead')
if self.skipfooter > 0:
reason = ('Error could possibly be due to '
'parsing errors in the skipped footer rows '
'(the skipfooter keyword is only applied '
'after Python\'s csv library has parsed '
'all rows).')
msg += '. ' + reason
self._alert_malformed(msg, row_num)
return None |
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs) | Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True. | Below is the the instruction that describes the task:
### Input:
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
### Response:
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs) |
def _preprocess_params(cls, kwargs):
"""Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters
"""
# kwargs.pop('csrf_token', None)
for attr, val in kwargs.items():
if cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_:
del kwargs[attr]
continue
if val == "":
# Making an assumption that there is no good usecase
# for setting an empty string. This will help prevent
# cases where empty string is sent because of client
# not clearing form fields to null
kwargs[attr] = None
continue
if attr in class_mapper(cls).relationships and attr not in cls._no_overwrite_:
rel = class_mapper(cls).relationships[attr]
if rel.uselist:
if isinstance(val, list):
if all(isinstance(v, dict) for v in val):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new_all(
list_of_kwargs=val, keys=[rel_cls.primary_key_name()])
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
mapping_col = rel.collection_class().keyfunc.name
list_of_kwargs = [merge(v, {mapping_col: k}) for k, v in val.items()]
kwargs[attr] = {getattr(obj, mapping_col): obj for obj in rel_cls.update_or_new_all(
list_of_kwargs=list_of_kwargs, keys=[rel_cls.primary_key_name()])}
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new(
**merge(val, {'keys': [rel_cls.primary_key_name()]}))
return kwargs | Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters | Below is the the instruction that describes the task:
### Input:
Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters
### Response:
def _preprocess_params(cls, kwargs):
"""Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters
"""
# kwargs.pop('csrf_token', None)
for attr, val in kwargs.items():
if cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_:
del kwargs[attr]
continue
if val == "":
# Making an assumption that there is no good usecase
# for setting an empty string. This will help prevent
# cases where empty string is sent because of client
# not clearing form fields to null
kwargs[attr] = None
continue
if attr in class_mapper(cls).relationships and attr not in cls._no_overwrite_:
rel = class_mapper(cls).relationships[attr]
if rel.uselist:
if isinstance(val, list):
if all(isinstance(v, dict) for v in val):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new_all(
list_of_kwargs=val, keys=[rel_cls.primary_key_name()])
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
mapping_col = rel.collection_class().keyfunc.name
list_of_kwargs = [merge(v, {mapping_col: k}) for k, v in val.items()]
kwargs[attr] = {getattr(obj, mapping_col): obj for obj in rel_cls.update_or_new_all(
list_of_kwargs=list_of_kwargs, keys=[rel_cls.primary_key_name()])}
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new(
**merge(val, {'keys': [rel_cls.primary_key_name()]}))
return kwargs |
def compile_state_action_constraints(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> List[TensorFluent]:
'''Compiles the state-action constraints given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = self.transition_scope(state, action)
constraints = []
with self.graph.as_default():
with tf.name_scope('state_action_constraints'):
for p in self.rddl.domain.constraints:
fluent = self._compile_expression(p, scope)
constraints.append(fluent)
return constraints | Compiles the state-action constraints given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`. | Below is the the instruction that describes the task:
### Input:
Compiles the state-action constraints given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
### Response:
def compile_state_action_constraints(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> List[TensorFluent]:
'''Compiles the state-action constraints given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = self.transition_scope(state, action)
constraints = []
with self.graph.as_default():
with tf.name_scope('state_action_constraints'):
for p in self.rddl.domain.constraints:
fluent = self._compile_expression(p, scope)
constraints.append(fluent)
return constraints |
def week_schedule(index, on_time=None, off_time=None, off_days=None):
""" Return boolean time series following given week schedule.
Parameters
----------
index : pandas.DatetimeIndex
Datetime index
on_time : str or datetime.time
Daily opening time. Default: '09:00'
off_time : str or datetime.time
Daily closing time. Default: '17:00'
off_days : list of str
List of weekdays. Default: ['Sunday', 'Monday']
Returns
-------
pandas.Series of bool
True when on, False otherwise for given datetime index
Examples
--------
>>> import pandas as pd
>>> from opengrid.library.utils import week_schedule
>>> index = pd.date_range('20170701', '20170710', freq='H')
>>> week_schedule(index)
"""
if on_time is None:
on_time = '9:00'
if off_time is None:
off_time = '17:00'
if off_days is None:
off_days = ['Sunday', 'Monday']
if not isinstance(on_time, datetime.time):
on_time = pd.to_datetime(on_time, format='%H:%M').time()
if not isinstance(off_time, datetime.time):
off_time = pd.to_datetime(off_time, format='%H:%M').time()
times = (index.time >= on_time) & (index.time < off_time) & (~index.weekday_name.isin(off_days))
return pd.Series(times, index=index) | Return boolean time series following given week schedule.
Parameters
----------
index : pandas.DatetimeIndex
Datetime index
on_time : str or datetime.time
Daily opening time. Default: '09:00'
off_time : str or datetime.time
Daily closing time. Default: '17:00'
off_days : list of str
List of weekdays. Default: ['Sunday', 'Monday']
Returns
-------
pandas.Series of bool
True when on, False otherwise for given datetime index
Examples
--------
>>> import pandas as pd
>>> from opengrid.library.utils import week_schedule
>>> index = pd.date_range('20170701', '20170710', freq='H')
>>> week_schedule(index) | Below is the the instruction that describes the task:
### Input:
Return boolean time series following given week schedule.
Parameters
----------
index : pandas.DatetimeIndex
Datetime index
on_time : str or datetime.time
Daily opening time. Default: '09:00'
off_time : str or datetime.time
Daily closing time. Default: '17:00'
off_days : list of str
List of weekdays. Default: ['Sunday', 'Monday']
Returns
-------
pandas.Series of bool
True when on, False otherwise for given datetime index
Examples
--------
>>> import pandas as pd
>>> from opengrid.library.utils import week_schedule
>>> index = pd.date_range('20170701', '20170710', freq='H')
>>> week_schedule(index)
### Response:
def week_schedule(index, on_time=None, off_time=None, off_days=None):
""" Return boolean time series following given week schedule.
Parameters
----------
index : pandas.DatetimeIndex
Datetime index
on_time : str or datetime.time
Daily opening time. Default: '09:00'
off_time : str or datetime.time
Daily closing time. Default: '17:00'
off_days : list of str
List of weekdays. Default: ['Sunday', 'Monday']
Returns
-------
pandas.Series of bool
True when on, False otherwise for given datetime index
Examples
--------
>>> import pandas as pd
>>> from opengrid.library.utils import week_schedule
>>> index = pd.date_range('20170701', '20170710', freq='H')
>>> week_schedule(index)
"""
if on_time is None:
on_time = '9:00'
if off_time is None:
off_time = '17:00'
if off_days is None:
off_days = ['Sunday', 'Monday']
if not isinstance(on_time, datetime.time):
on_time = pd.to_datetime(on_time, format='%H:%M').time()
if not isinstance(off_time, datetime.time):
off_time = pd.to_datetime(off_time, format='%H:%M').time()
times = (index.time >= on_time) & (index.time < off_time) & (~index.weekday_name.isin(off_days))
return pd.Series(times, index=index) |
def delete(cont, path=None, profile=None):
'''
Delete a container, or delete an object from a container.
CLI Example to delete a container::
salt myminion swift.delete mycontainer
CLI Example to delete an object from a container::
salt myminion swift.delete mycontainer remoteobject
'''
swift_conn = _auth(profile)
if path is None:
return swift_conn.delete_container(cont)
else:
return swift_conn.delete_object(cont, path) | Delete a container, or delete an object from a container.
CLI Example to delete a container::
salt myminion swift.delete mycontainer
CLI Example to delete an object from a container::
salt myminion swift.delete mycontainer remoteobject | Below is the the instruction that describes the task:
### Input:
Delete a container, or delete an object from a container.
CLI Example to delete a container::
salt myminion swift.delete mycontainer
CLI Example to delete an object from a container::
salt myminion swift.delete mycontainer remoteobject
### Response:
def delete(cont, path=None, profile=None):
'''
Delete a container, or delete an object from a container.
CLI Example to delete a container::
salt myminion swift.delete mycontainer
CLI Example to delete an object from a container::
salt myminion swift.delete mycontainer remoteobject
'''
swift_conn = _auth(profile)
if path is None:
return swift_conn.delete_container(cont)
else:
return swift_conn.delete_object(cont, path) |
def learn(self, bottomUpInput):
"""
Parameters:
--------------------------------------------
input: Current bottom-up input
retval: ?
"""
self.lrnIterationIdx = self.lrnIterationIdx + 1
self.iterationIdx = self.iterationIdx + 1
if self.verbosity >= 3:
print "\n==== Iteration: %d =====" % (self.iterationIdx)
print "Active cols:", bottomUpInput.nonzero()[0]
if self.verbosity >= 4:
print len(self.segmentUpdates), "updates"
for k,v in self.segmentUpdates.iteritems():
print 'cell:', k[0] * self.cellsPerColumn + k[1],
for vv in v:
print 'seg:', vv[1].segment,
print 'timeStamp:', vv[0],
print '/ src cells:', vv[1].activeSynapses
# Copy t into t-1
# This time also copy over learnState.
self.activeState['t-1'][:,:] = self.activeState['t'][:,:]
self.activeState['t'].fill(0)
self.predictedState['t-1'][:,:] = self.predictedState['t'][:,:]
self.predictedState['t'].fill(0)
self.learnState['t-1'][:,:] = self.learnState['t'][:,:]
self.learnState['t'][:,:] = 0
self.confidence['t-1'][:,:] = self.confidence['t'][:,:]
self.confidence['t'].fill(0.0)
# Update segment duty cycles if we are crossing a "tier"
# We determine if it's time to update the segment duty cycles. Since the
# duty cycle calculation is a moving average based on a tiered alpha, it is
# important that we update all segments on each tier boundary
if self.lrnIterationIdx in Segment.dutyCycleTiers:
for c, i in product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# Phase 1: compute current state for each cell
# For each column (winning in the SP):
# - if the bottom up input was predicted by one of the sequence
# segments of a cell, this cell bursts, (those cells also become
# learning cells),
# - if the bottom up input was not predicted by any cell, the entire
# column bursts, AND:
# - if a cell had a sequence segment above minThreshold, it becomes
# a learning cell, else
# - we create a new sequence segment for that cell, and it also
# becomes a learning cell.
activeColumns = bottomUpInput.nonzero()[0]
numUnpredictedColumns = 0
for c in activeColumns:
# todo: cache this list when building it in iteration at t
buPredicted = False # Was this bottom up input predicted?
predictingCells = numpy.where(self.predictedState['t-1'][c] == 1)[0]
for i in predictingCells:
# Convert from Numpy integer structures, for debugging in Komodo
c,i = int(c), int(i)
buPredicted = True
self.activeState['t'][c,i] = 1
self.learnState['t'][c,i] = 1
# Turn on the active state of the whole column if the bottom up input was
# not predicted
if not buPredicted:
numUnpredictedColumns += 1
self.activeState['t'][c,:] = 1
# We didn't find a cell to learn on (since no cell was predicted), so
# just pick the best matching cell, or, failing that, the least allocated
# cell.
# sum(all synapses) >= minThreshold, "weak" activation
i,s = self.getBestMatchingCell(c,self.activeState['t-1'])
if s is not None:
s.totalActivations += 1 # activationFrequency
s.lastActiveIteration = self.iterationIdx
else:
# if best matching cell does not exist, then get least used cell
i = self.getLeastUsedCell(c)
s = None
# Update a possibly weakly matching segment
# todo: In here, should we only use learnState to check for
# a weak match?
self.learnState['t'][c,i] = 1
self.activeState['t'][c,i] = 1 # In case we are in PAM mode
# propose a list of synapse change
segUpdate = self.getSegmentActiveSynapses(c,i,s,'t-1',
newSynapses = True)
segUpdate.phase1Flag = True
self.addToSegmentUpdates(c, i, segUpdate)
# ----------------------------------------------------------------------
# Phase 2: compute predicted state for each cell
# - if a segment has enough horizontal connections firing because of
# bottomUpInput, it's set to be predicting, and we queue up the segment
# for reinforcement,
self.computePhase2(doLearn=True)
# ----------------------------------------------------------------------
# Phase 3: update synapses for currently active cells (due to bottom up input)
# that have queued up segment updates, or cells that just stopped predicting
# (those cells that stopped predicting are _negatively_ reinforced).
# Also clean up queues by removing seg updates that are too old.
self.processSegmentUpdates()
# ----------------------------------------------------------------------
# Phase 4: Apply global decay, and remove synapses and/or segments.
# Synapses are removed if their permanence value is <= 0.
# Segments are removed when they don't have synapses anymore.
# Removal of synapses can trigger removal of whole segments!
# todo: isolate the synapse/segment retraction logic so that
# it can be called in adaptSegments, in the case where we
# do global decay only episodically.
if self.globalDecay > 0.0 and ((self.iterationIdx % self.maxAge) == 0):
for c, i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)):
segsToDel = [] # collect and remove outside the loop
for segment in self.cells[c][i]:
age = self.iterationIdx - segment[0][self.kSegLastActiveIteration]
if age <= self.maxAge:
continue
#print "Decrementing seg age %d:" % (age), c, i, segment
synsToDel = [] # collect and remove outside the loop
for synapse in segment.syns: # skip sequenceSegment flag
synapse[2] = synapse[2] - self.globalDecay # decrease permanence
if synapse[2] <= 0:
synsToDel.append(synapse) # add to list to delete
if len(synsToDel) == len(segment.syns): # 1 for sequenceSegment flag
segsToDel.append(segment) # will remove the whole segment
elif len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
for seg in segsToDel: # remove some segments of this cell
self.cleanUpdatesList(c,i,seg)
self.cells[c][i].remove(seg)
# Update the prediction score stats
# Learning always includes inference
if self.collectStats:
self._updateStatsInferEnd(self._internalStats, activeColumns,
self.predictedState['t-1'],
self.confidence['t-1'])
# Finally return learning output
output = self.computeOutput()
# Print diagnostic information based on the current verbosity level
self.printComputeEnd(output, learn=True)
self.resetCalled = False
return self.computeOutput() | Parameters:
--------------------------------------------
input: Current bottom-up input
retval: ? | Below is the the instruction that describes the task:
### Input:
Parameters:
--------------------------------------------
input: Current bottom-up input
retval: ?
### Response:
def learn(self, bottomUpInput):
"""
Parameters:
--------------------------------------------
input: Current bottom-up input
retval: ?
"""
self.lrnIterationIdx = self.lrnIterationIdx + 1
self.iterationIdx = self.iterationIdx + 1
if self.verbosity >= 3:
print "\n==== Iteration: %d =====" % (self.iterationIdx)
print "Active cols:", bottomUpInput.nonzero()[0]
if self.verbosity >= 4:
print len(self.segmentUpdates), "updates"
for k,v in self.segmentUpdates.iteritems():
print 'cell:', k[0] * self.cellsPerColumn + k[1],
for vv in v:
print 'seg:', vv[1].segment,
print 'timeStamp:', vv[0],
print '/ src cells:', vv[1].activeSynapses
# Copy t into t-1
# This time also copy over learnState.
self.activeState['t-1'][:,:] = self.activeState['t'][:,:]
self.activeState['t'].fill(0)
self.predictedState['t-1'][:,:] = self.predictedState['t'][:,:]
self.predictedState['t'].fill(0)
self.learnState['t-1'][:,:] = self.learnState['t'][:,:]
self.learnState['t'][:,:] = 0
self.confidence['t-1'][:,:] = self.confidence['t'][:,:]
self.confidence['t'].fill(0.0)
# Update segment duty cycles if we are crossing a "tier"
# We determine if it's time to update the segment duty cycles. Since the
# duty cycle calculation is a moving average based on a tiered alpha, it is
# important that we update all segments on each tier boundary
if self.lrnIterationIdx in Segment.dutyCycleTiers:
for c, i in product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# Phase 1: compute current state for each cell
# For each column (winning in the SP):
# - if the bottom up input was predicted by one of the sequence
# segments of a cell, this cell bursts, (those cells also become
# learning cells),
# - if the bottom up input was not predicted by any cell, the entire
# column bursts, AND:
# - if a cell had a sequence segment above minThreshold, it becomes
# a learning cell, else
# - we create a new sequence segment for that cell, and it also
# becomes a learning cell.
activeColumns = bottomUpInput.nonzero()[0]
numUnpredictedColumns = 0
for c in activeColumns:
# todo: cache this list when building it in iteration at t
buPredicted = False # Was this bottom up input predicted?
predictingCells = numpy.where(self.predictedState['t-1'][c] == 1)[0]
for i in predictingCells:
# Convert from Numpy integer structures, for debugging in Komodo
c,i = int(c), int(i)
buPredicted = True
self.activeState['t'][c,i] = 1
self.learnState['t'][c,i] = 1
# Turn on the active state of the whole column if the bottom up input was
# not predicted
if not buPredicted:
numUnpredictedColumns += 1
self.activeState['t'][c,:] = 1
# We didn't find a cell to learn on (since no cell was predicted), so
# just pick the best matching cell, or, failing that, the least allocated
# cell.
# sum(all synapses) >= minThreshold, "weak" activation
i,s = self.getBestMatchingCell(c,self.activeState['t-1'])
if s is not None:
s.totalActivations += 1 # activationFrequency
s.lastActiveIteration = self.iterationIdx
else:
# if best matching cell does not exist, then get least used cell
i = self.getLeastUsedCell(c)
s = None
# Update a possibly weakly matching segment
# todo: In here, should we only use learnState to check for
# a weak match?
self.learnState['t'][c,i] = 1
self.activeState['t'][c,i] = 1 # In case we are in PAM mode
# propose a list of synapse change
segUpdate = self.getSegmentActiveSynapses(c,i,s,'t-1',
newSynapses = True)
segUpdate.phase1Flag = True
self.addToSegmentUpdates(c, i, segUpdate)
# ----------------------------------------------------------------------
# Phase 2: compute predicted state for each cell
# - if a segment has enough horizontal connections firing because of
# bottomUpInput, it's set to be predicting, and we queue up the segment
# for reinforcement,
self.computePhase2(doLearn=True)
# ----------------------------------------------------------------------
# Phase 3: update synapses for currently active cells (due to bottom up input)
# that have queued up segment updates, or cells that just stopped predicting
# (those cells that stopped predicting are _negatively_ reinforced).
# Also clean up queues by removing seg updates that are too old.
self.processSegmentUpdates()
# ----------------------------------------------------------------------
# Phase 4: Apply global decay, and remove synapses and/or segments.
# Synapses are removed if their permanence value is <= 0.
# Segments are removed when they don't have synapses anymore.
# Removal of synapses can trigger removal of whole segments!
# todo: isolate the synapse/segment retraction logic so that
# it can be called in adaptSegments, in the case where we
# do global decay only episodically.
if self.globalDecay > 0.0 and ((self.iterationIdx % self.maxAge) == 0):
for c, i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)):
segsToDel = [] # collect and remove outside the loop
for segment in self.cells[c][i]:
age = self.iterationIdx - segment[0][self.kSegLastActiveIteration]
if age <= self.maxAge:
continue
#print "Decrementing seg age %d:" % (age), c, i, segment
synsToDel = [] # collect and remove outside the loop
for synapse in segment.syns: # skip sequenceSegment flag
synapse[2] = synapse[2] - self.globalDecay # decrease permanence
if synapse[2] <= 0:
synsToDel.append(synapse) # add to list to delete
if len(synsToDel) == len(segment.syns): # 1 for sequenceSegment flag
segsToDel.append(segment) # will remove the whole segment
elif len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
for seg in segsToDel: # remove some segments of this cell
self.cleanUpdatesList(c,i,seg)
self.cells[c][i].remove(seg)
# Update the prediction score stats
# Learning always includes inference
if self.collectStats:
self._updateStatsInferEnd(self._internalStats, activeColumns,
self.predictedState['t-1'],
self.confidence['t-1'])
# Finally return learning output
output = self.computeOutput()
# Print diagnostic information based on the current verbosity level
self.printComputeEnd(output, learn=True)
self.resetCalled = False
return self.computeOutput() |
def exist_edge(self, graph, orig, dest, idx, branch, turn, tick, extant):
"""Declare whether or not this edge exists."""
if (branch, turn, tick) in self._btts:
raise TimeError
self._btts.add((branch, turn, tick))
graph, orig, dest = map(self.pack, (graph, orig, dest))
self._edges2set.append((graph, orig, dest, idx, branch, turn, tick, extant)) | Declare whether or not this edge exists. | Below is the the instruction that describes the task:
### Input:
Declare whether or not this edge exists.
### Response:
def exist_edge(self, graph, orig, dest, idx, branch, turn, tick, extant):
"""Declare whether or not this edge exists."""
if (branch, turn, tick) in self._btts:
raise TimeError
self._btts.add((branch, turn, tick))
graph, orig, dest = map(self.pack, (graph, orig, dest))
self._edges2set.append((graph, orig, dest, idx, branch, turn, tick, extant)) |
def _url_for(endpoint: str, **values) -> Union[str, None]:
"""
The same as flask's url_for, except this also supports building external
urls for hosts that are different from app.config.SERVER_NAME. One case
where this is especially useful is for single page apps, where the frontend
is not hosted by the same server as the backend, but the backend still needs
to generate urls to frontend routes
:param endpoint: the name of the endpoint
:param values: the variable arguments of the URL rule
:return: a url path, or None
"""
_external_host = values.pop('_external_host', None)
is_external = bool(_external_host or values.get('_external'))
external_host = _external_host or current_app.config.get('EXTERNAL_SERVER_NAME')
if not is_external or not external_host:
return flask_url_for(endpoint, **values)
if '://' not in external_host:
external_host = f'http://{external_host}'
values.pop('_external')
return external_host.rstrip('/') + flask_url_for(endpoint, **values) | The same as flask's url_for, except this also supports building external
urls for hosts that are different from app.config.SERVER_NAME. One case
where this is especially useful is for single page apps, where the frontend
is not hosted by the same server as the backend, but the backend still needs
to generate urls to frontend routes
:param endpoint: the name of the endpoint
:param values: the variable arguments of the URL rule
:return: a url path, or None | Below is the the instruction that describes the task:
### Input:
The same as flask's url_for, except this also supports building external
urls for hosts that are different from app.config.SERVER_NAME. One case
where this is especially useful is for single page apps, where the frontend
is not hosted by the same server as the backend, but the backend still needs
to generate urls to frontend routes
:param endpoint: the name of the endpoint
:param values: the variable arguments of the URL rule
:return: a url path, or None
### Response:
def _url_for(endpoint: str, **values) -> Union[str, None]:
"""
The same as flask's url_for, except this also supports building external
urls for hosts that are different from app.config.SERVER_NAME. One case
where this is especially useful is for single page apps, where the frontend
is not hosted by the same server as the backend, but the backend still needs
to generate urls to frontend routes
:param endpoint: the name of the endpoint
:param values: the variable arguments of the URL rule
:return: a url path, or None
"""
_external_host = values.pop('_external_host', None)
is_external = bool(_external_host or values.get('_external'))
external_host = _external_host or current_app.config.get('EXTERNAL_SERVER_NAME')
if not is_external or not external_host:
return flask_url_for(endpoint, **values)
if '://' not in external_host:
external_host = f'http://{external_host}'
values.pop('_external')
return external_host.rstrip('/') + flask_url_for(endpoint, **values) |
def sleep(self, seconds):
"""Services all futures while waiting
Args:
seconds (float): Time to wait
"""
until = time.time() + seconds
try:
while True:
self._service_futures([], until)
except TimeoutError:
return | Services all futures while waiting
Args:
seconds (float): Time to wait | Below is the the instruction that describes the task:
### Input:
Services all futures while waiting
Args:
seconds (float): Time to wait
### Response:
def sleep(self, seconds):
"""Services all futures while waiting
Args:
seconds (float): Time to wait
"""
until = time.time() + seconds
try:
while True:
self._service_futures([], until)
except TimeoutError:
return |
def read_struct_field(self, struct_name, field_name, x, y, p=0):
"""Read the value out of a struct maintained by SARK.
This method is particularly useful for reading fields from the ``sv``
struct which, for example, holds information about system status. See
``sark.h`` for details.
Parameters
----------
struct_name : string
Name of the struct to read from, e.g., `"sv"`
field_name : string
Name of the field to read, e.g., `"eth_addr"`
Returns
-------
value
The value returned is unpacked given the struct specification.
Currently arrays are returned as tuples, e.g.::
# Returns a 20-tuple.
cn.read_struct_field("sv", "status_map")
# Fails
cn.read_struct_field("sv", "status_map[1]")
"""
# Look up the struct and field
field, address, pack_chars = \
self._get_struct_field_and_address(struct_name, field_name)
length = struct.calcsize(pack_chars)
# Perform the read
data = self.read(address, length, x, y, p)
# Unpack the data
unpacked = struct.unpack(pack_chars, data)
if field.length == 1:
return unpacked[0]
else:
return unpacked | Read the value out of a struct maintained by SARK.
This method is particularly useful for reading fields from the ``sv``
struct which, for example, holds information about system status. See
``sark.h`` for details.
Parameters
----------
struct_name : string
Name of the struct to read from, e.g., `"sv"`
field_name : string
Name of the field to read, e.g., `"eth_addr"`
Returns
-------
value
The value returned is unpacked given the struct specification.
Currently arrays are returned as tuples, e.g.::
# Returns a 20-tuple.
cn.read_struct_field("sv", "status_map")
# Fails
cn.read_struct_field("sv", "status_map[1]") | Below is the the instruction that describes the task:
### Input:
Read the value out of a struct maintained by SARK.
This method is particularly useful for reading fields from the ``sv``
struct which, for example, holds information about system status. See
``sark.h`` for details.
Parameters
----------
struct_name : string
Name of the struct to read from, e.g., `"sv"`
field_name : string
Name of the field to read, e.g., `"eth_addr"`
Returns
-------
value
The value returned is unpacked given the struct specification.
Currently arrays are returned as tuples, e.g.::
# Returns a 20-tuple.
cn.read_struct_field("sv", "status_map")
# Fails
cn.read_struct_field("sv", "status_map[1]")
### Response:
def read_struct_field(self, struct_name, field_name, x, y, p=0):
"""Read the value out of a struct maintained by SARK.
This method is particularly useful for reading fields from the ``sv``
struct which, for example, holds information about system status. See
``sark.h`` for details.
Parameters
----------
struct_name : string
Name of the struct to read from, e.g., `"sv"`
field_name : string
Name of the field to read, e.g., `"eth_addr"`
Returns
-------
value
The value returned is unpacked given the struct specification.
Currently arrays are returned as tuples, e.g.::
# Returns a 20-tuple.
cn.read_struct_field("sv", "status_map")
# Fails
cn.read_struct_field("sv", "status_map[1]")
"""
# Look up the struct and field
field, address, pack_chars = \
self._get_struct_field_and_address(struct_name, field_name)
length = struct.calcsize(pack_chars)
# Perform the read
data = self.read(address, length, x, y, p)
# Unpack the data
unpacked = struct.unpack(pack_chars, data)
if field.length == 1:
return unpacked[0]
else:
return unpacked |
def validate(self, data, schema):
'''Perform a data validation against a given schema.
:param data: an object to validate
:param schema: a Voluptous schema to validate against
'''
try:
return schema(data)
except MultipleInvalid as ie:
errors = []
for error in ie.errors:
if error.path:
field = '.'.join(str(p) for p in error.path)
path = error.path
value = data
while path:
attr = path.pop(0)
try:
if isinstance(value, (list, tuple)):
attr = int(attr)
value = value[attr]
except Exception:
value = None
txt = safe_unicode(error).replace('for dictionary value', '')
txt = txt.strip()
if isinstance(error, RequiredFieldInvalid):
msg = '[{0}] {1}'
else:
msg = '[{0}] {1}: {2}'
try:
msg = msg.format(field, txt, str(value))
except Exception:
msg = '[{0}] {1}'.format(field, txt)
else:
msg = str(error)
errors.append(msg)
msg = '\n- '.join(['Validation error:'] + errors)
raise HarvestValidationError(msg) | Perform a data validation against a given schema.
:param data: an object to validate
:param schema: a Voluptous schema to validate against | Below is the the instruction that describes the task:
### Input:
Perform a data validation against a given schema.
:param data: an object to validate
:param schema: a Voluptous schema to validate against
### Response:
def validate(self, data, schema):
'''Perform a data validation against a given schema.
:param data: an object to validate
:param schema: a Voluptous schema to validate against
'''
try:
return schema(data)
except MultipleInvalid as ie:
errors = []
for error in ie.errors:
if error.path:
field = '.'.join(str(p) for p in error.path)
path = error.path
value = data
while path:
attr = path.pop(0)
try:
if isinstance(value, (list, tuple)):
attr = int(attr)
value = value[attr]
except Exception:
value = None
txt = safe_unicode(error).replace('for dictionary value', '')
txt = txt.strip()
if isinstance(error, RequiredFieldInvalid):
msg = '[{0}] {1}'
else:
msg = '[{0}] {1}: {2}'
try:
msg = msg.format(field, txt, str(value))
except Exception:
msg = '[{0}] {1}'.format(field, txt)
else:
msg = str(error)
errors.append(msg)
msg = '\n- '.join(['Validation error:'] + errors)
raise HarvestValidationError(msg) |
def getArrays(self, attr=None, specfiles=None, sort=False, reverse=False,
selector=None, defaultValue=None):
"""Return a condensed array of data selected from :class:`Fi` instances
from ``self.container`` for fast and convenient data processing.
:param attr: list of :class:`Fi` item attributes that should be added
to the returned array. The attributes "id" and "specfile" are always
included, in combination they serve as a unique id.
:param defaultValue: if an item is missing an attribute, the
"defaultValue" is added to the array instead.
:param specfiles: filenames of ms-run files - if specified return only
items from those files
:type specfiles: str or [str, str, ...]
:param sort: if "sort" is specified the returned list of items is sorted
according to the :class:`Fi` attribute specified by "sort", if the
attribute is not present the item is skipped.
:param reverse: bool, set True to reverse sort order
:param selector: a function which is called with each `Fi` item and has
to return True (include item) or False (discard item).
Default function is: ``lambda si: True``. By default only items with
``Fi.isValid == True`` are returned.
:returns: {'attribute1': numpy.array(),
'attribute2': numpy.array(),
...
}
"""
selector = (lambda fi: fi.isValid) if selector is None else selector
attr = attr if attr is not None else []
attr = set(['id', 'specfile'] + aux.toList(attr))
items = self.getItems(specfiles, sort, reverse, selector)
return _getArrays(items, attr, defaultValue) | Return a condensed array of data selected from :class:`Fi` instances
from ``self.container`` for fast and convenient data processing.
:param attr: list of :class:`Fi` item attributes that should be added
to the returned array. The attributes "id" and "specfile" are always
included, in combination they serve as a unique id.
:param defaultValue: if an item is missing an attribute, the
"defaultValue" is added to the array instead.
:param specfiles: filenames of ms-run files - if specified return only
items from those files
:type specfiles: str or [str, str, ...]
:param sort: if "sort" is specified the returned list of items is sorted
according to the :class:`Fi` attribute specified by "sort", if the
attribute is not present the item is skipped.
:param reverse: bool, set True to reverse sort order
:param selector: a function which is called with each `Fi` item and has
to return True (include item) or False (discard item).
Default function is: ``lambda si: True``. By default only items with
``Fi.isValid == True`` are returned.
:returns: {'attribute1': numpy.array(),
'attribute2': numpy.array(),
...
} | Below is the the instruction that describes the task:
### Input:
Return a condensed array of data selected from :class:`Fi` instances
from ``self.container`` for fast and convenient data processing.
:param attr: list of :class:`Fi` item attributes that should be added
to the returned array. The attributes "id" and "specfile" are always
included, in combination they serve as a unique id.
:param defaultValue: if an item is missing an attribute, the
"defaultValue" is added to the array instead.
:param specfiles: filenames of ms-run files - if specified return only
items from those files
:type specfiles: str or [str, str, ...]
:param sort: if "sort" is specified the returned list of items is sorted
according to the :class:`Fi` attribute specified by "sort", if the
attribute is not present the item is skipped.
:param reverse: bool, set True to reverse sort order
:param selector: a function which is called with each `Fi` item and has
to return True (include item) or False (discard item).
Default function is: ``lambda si: True``. By default only items with
``Fi.isValid == True`` are returned.
:returns: {'attribute1': numpy.array(),
'attribute2': numpy.array(),
...
}
### Response:
def getArrays(self, attr=None, specfiles=None, sort=False, reverse=False,
selector=None, defaultValue=None):
"""Return a condensed array of data selected from :class:`Fi` instances
from ``self.container`` for fast and convenient data processing.
:param attr: list of :class:`Fi` item attributes that should be added
to the returned array. The attributes "id" and "specfile" are always
included, in combination they serve as a unique id.
:param defaultValue: if an item is missing an attribute, the
"defaultValue" is added to the array instead.
:param specfiles: filenames of ms-run files - if specified return only
items from those files
:type specfiles: str or [str, str, ...]
:param sort: if "sort" is specified the returned list of items is sorted
according to the :class:`Fi` attribute specified by "sort", if the
attribute is not present the item is skipped.
:param reverse: bool, set True to reverse sort order
:param selector: a function which is called with each `Fi` item and has
to return True (include item) or False (discard item).
Default function is: ``lambda si: True``. By default only items with
``Fi.isValid == True`` are returned.
:returns: {'attribute1': numpy.array(),
'attribute2': numpy.array(),
...
}
"""
selector = (lambda fi: fi.isValid) if selector is None else selector
attr = attr if attr is not None else []
attr = set(['id', 'specfile'] + aux.toList(attr))
items = self.getItems(specfiles, sort, reverse, selector)
return _getArrays(items, attr, defaultValue) |
def delete_product_by_id(cls, product_id, **kwargs):
"""Delete Product
Delete an instance of Product by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_product_by_id(product_id, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_product_by_id_with_http_info(product_id, **kwargs)
else:
(data) = cls._delete_product_by_id_with_http_info(product_id, **kwargs)
return data | Delete Product
Delete an instance of Product by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_product_by_id(product_id, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Delete Product
Delete an instance of Product by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_product_by_id(product_id, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_product_by_id(cls, product_id, **kwargs):
"""Delete Product
Delete an instance of Product by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_product_by_id(product_id, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_product_by_id_with_http_info(product_id, **kwargs)
else:
(data) = cls._delete_product_by_id_with_http_info(product_id, **kwargs)
return data |
def init_widget(self):
""" Initialize the underlying widget.
"""
super(AndroidSwipeRefreshLayout, self).init_widget()
d = self.declaration
w = self.widget
if not d.enabled:
self.set_enabled(d.enabled)
if d.indicator_background_color:
self.set_indicator_background_color(d.indicator_background_color)
if d.indicator_color:
self.set_indicator_color(d.indicator_color)
if d.trigger_distance:
self.set_trigger_distance(d.trigger_distance)
w.onRefresh.connect(self.on_refresh)
w.setOnRefreshListener(w.getId()) | Initialize the underlying widget. | Below is the the instruction that describes the task:
### Input:
Initialize the underlying widget.
### Response:
def init_widget(self):
""" Initialize the underlying widget.
"""
super(AndroidSwipeRefreshLayout, self).init_widget()
d = self.declaration
w = self.widget
if not d.enabled:
self.set_enabled(d.enabled)
if d.indicator_background_color:
self.set_indicator_background_color(d.indicator_background_color)
if d.indicator_color:
self.set_indicator_color(d.indicator_color)
if d.trigger_distance:
self.set_trigger_distance(d.trigger_distance)
w.onRefresh.connect(self.on_refresh)
w.setOnRefreshListener(w.getId()) |
def gfposc(target, inframe, abcorr, obsrvr, crdsys, coord, relate, refval,
adjust, step, nintvals, cnfine, result=None):
"""
Determine time intervals for which a coordinate of an
observer-target position vector satisfies a numerical constraint.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfposc_c.html
:param target: Name of the target body.
:type target: str
:param inframe: Name of the reference frame for coordinate calculations.
:type inframe: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param obsrvr: Name of the observing body.
:type obsrvr: str
:param crdsys: Name of the coordinate system containing COORD
:type crdsys: str
:param coord: Name of the coordinate of interest
:type coord: str
:param relate: Relational operator.
:type relate: str
:param refval: Reference value.
:type refval: float
:param adjust: Adjustment value for absolute extrema searches.
:type adjust: float
:param step: Step size used for locating extrema and roots.
:type step: float
:param nintvals: Workspace window interval count.
:type nintvals: int
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: Optional SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(cnfine, stypes.SpiceCell)
assert cnfine.is_double()
if result is None:
result = stypes.SPICEDOUBLE_CELL(2000)
else:
assert isinstance(result, stypes.SpiceCell)
assert result.is_double()
target = stypes.stringToCharP(target)
inframe = stypes.stringToCharP(inframe)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
crdsys = stypes.stringToCharP(crdsys)
coord = stypes.stringToCharP(coord)
relate = stypes.stringToCharP(relate)
refval = ctypes.c_double(refval)
adjust = ctypes.c_double(adjust)
step = ctypes.c_double(step)
nintvals = ctypes.c_int(nintvals)
libspice.gfposc_c(target, inframe, abcorr, obsrvr, crdsys, coord,
relate, refval, adjust, step, nintvals,
ctypes.byref(cnfine), ctypes.byref(result))
return result | Determine time intervals for which a coordinate of an
observer-target position vector satisfies a numerical constraint.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfposc_c.html
:param target: Name of the target body.
:type target: str
:param inframe: Name of the reference frame for coordinate calculations.
:type inframe: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param obsrvr: Name of the observing body.
:type obsrvr: str
:param crdsys: Name of the coordinate system containing COORD
:type crdsys: str
:param coord: Name of the coordinate of interest
:type coord: str
:param relate: Relational operator.
:type relate: str
:param refval: Reference value.
:type refval: float
:param adjust: Adjustment value for absolute extrema searches.
:type adjust: float
:param step: Step size used for locating extrema and roots.
:type step: float
:param nintvals: Workspace window interval count.
:type nintvals: int
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: Optional SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell | Below is the the instruction that describes the task:
### Input:
Determine time intervals for which a coordinate of an
observer-target position vector satisfies a numerical constraint.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfposc_c.html
:param target: Name of the target body.
:type target: str
:param inframe: Name of the reference frame for coordinate calculations.
:type inframe: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param obsrvr: Name of the observing body.
:type obsrvr: str
:param crdsys: Name of the coordinate system containing COORD
:type crdsys: str
:param coord: Name of the coordinate of interest
:type coord: str
:param relate: Relational operator.
:type relate: str
:param refval: Reference value.
:type refval: float
:param adjust: Adjustment value for absolute extrema searches.
:type adjust: float
:param step: Step size used for locating extrema and roots.
:type step: float
:param nintvals: Workspace window interval count.
:type nintvals: int
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: Optional SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell
### Response:
def gfposc(target, inframe, abcorr, obsrvr, crdsys, coord, relate, refval,
adjust, step, nintvals, cnfine, result=None):
"""
Determine time intervals for which a coordinate of an
observer-target position vector satisfies a numerical constraint.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfposc_c.html
:param target: Name of the target body.
:type target: str
:param inframe: Name of the reference frame for coordinate calculations.
:type inframe: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param obsrvr: Name of the observing body.
:type obsrvr: str
:param crdsys: Name of the coordinate system containing COORD
:type crdsys: str
:param coord: Name of the coordinate of interest
:type coord: str
:param relate: Relational operator.
:type relate: str
:param refval: Reference value.
:type refval: float
:param adjust: Adjustment value for absolute extrema searches.
:type adjust: float
:param step: Step size used for locating extrema and roots.
:type step: float
:param nintvals: Workspace window interval count.
:type nintvals: int
:param cnfine: SPICE window to which the search is restricted.
:type cnfine: spiceypy.utils.support_types.SpiceCell
:param result: Optional SPICE window containing results.
:type result: spiceypy.utils.support_types.SpiceCell
"""
assert isinstance(cnfine, stypes.SpiceCell)
assert cnfine.is_double()
if result is None:
result = stypes.SPICEDOUBLE_CELL(2000)
else:
assert isinstance(result, stypes.SpiceCell)
assert result.is_double()
target = stypes.stringToCharP(target)
inframe = stypes.stringToCharP(inframe)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
crdsys = stypes.stringToCharP(crdsys)
coord = stypes.stringToCharP(coord)
relate = stypes.stringToCharP(relate)
refval = ctypes.c_double(refval)
adjust = ctypes.c_double(adjust)
step = ctypes.c_double(step)
nintvals = ctypes.c_int(nintvals)
libspice.gfposc_c(target, inframe, abcorr, obsrvr, crdsys, coord,
relate, refval, adjust, step, nintvals,
ctypes.byref(cnfine), ctypes.byref(result))
return result |
def normalLines(actor, ratio=1, c=(0.6, 0.6, 0.6), alpha=0.8):
"""
Build an ``vtkActor`` made of the normals at vertices shown as lines.
"""
maskPts = vtk.vtkMaskPoints()
maskPts.SetOnRatio(ratio)
maskPts.RandomModeOff()
actor = actor.computeNormals()
src = actor.polydata()
maskPts.SetInputData(src)
arrow = vtk.vtkLineSource()
arrow.SetPoint1(0, 0, 0)
arrow.SetPoint2(0.75, 0, 0)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInputConnection(maskPts.GetOutputPort())
glyph.SetVectorModeToUseNormal()
b = src.GetBounds()
sc = max([b[1] - b[0], b[3] - b[2], b[5] - b[4]]) / 20.0
glyph.SetScaleFactor(sc)
glyph.OrientOn()
glyph.Update()
glyphActor = Actor(glyph.GetOutput(), c=vc.getColor(c), alpha=alpha)
glyphActor.mapper.SetScalarModeToUsePointFieldData()
glyphActor.PickableOff()
prop = vtk.vtkProperty()
prop.DeepCopy(actor.GetProperty())
glyphActor.SetProperty(prop)
return glyphActor | Build an ``vtkActor`` made of the normals at vertices shown as lines. | Below is the the instruction that describes the task:
### Input:
Build an ``vtkActor`` made of the normals at vertices shown as lines.
### Response:
def normalLines(actor, ratio=1, c=(0.6, 0.6, 0.6), alpha=0.8):
"""
Build an ``vtkActor`` made of the normals at vertices shown as lines.
"""
maskPts = vtk.vtkMaskPoints()
maskPts.SetOnRatio(ratio)
maskPts.RandomModeOff()
actor = actor.computeNormals()
src = actor.polydata()
maskPts.SetInputData(src)
arrow = vtk.vtkLineSource()
arrow.SetPoint1(0, 0, 0)
arrow.SetPoint2(0.75, 0, 0)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInputConnection(maskPts.GetOutputPort())
glyph.SetVectorModeToUseNormal()
b = src.GetBounds()
sc = max([b[1] - b[0], b[3] - b[2], b[5] - b[4]]) / 20.0
glyph.SetScaleFactor(sc)
glyph.OrientOn()
glyph.Update()
glyphActor = Actor(glyph.GetOutput(), c=vc.getColor(c), alpha=alpha)
glyphActor.mapper.SetScalarModeToUsePointFieldData()
glyphActor.PickableOff()
prop = vtk.vtkProperty()
prop.DeepCopy(actor.GetProperty())
glyphActor.SetProperty(prop)
return glyphActor |
def execute(self, code, silent=False,
user_variables=None, user_expressions=None, allow_stdin=None):
"""Execute code in the kernel.
Parameters
----------
code : str
A string of Python code.
silent : bool, optional (default False)
If set, the kernel will execute the code as quietly possible.
user_variables : list, optional
A list of variable names to pull from the user's namespace. They
will come back as a dict with these names as keys and their
:func:`repr` as values.
user_expressions : dict, optional
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
allow_stdin : bool, optional
Flag for
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
Returns
-------
The msg_id of the message sent.
"""
if user_variables is None:
user_variables = []
if user_expressions is None:
user_expressions = {}
if allow_stdin is None:
allow_stdin = self.allow_stdin
# Don't waste network traffic if inputs are invalid
if not isinstance(code, basestring):
raise ValueError('code %r must be a string' % code)
validate_string_list(user_variables)
validate_string_dict(user_expressions)
# Create class for content/msg creation. Related to, but possibly
# not in Session.
content = dict(code=code, silent=silent,
user_variables=user_variables,
user_expressions=user_expressions,
allow_stdin=allow_stdin,
)
msg = self.session.msg('execute_request', content)
self._queue_send(msg)
return msg['header']['msg_id'] | Execute code in the kernel.
Parameters
----------
code : str
A string of Python code.
silent : bool, optional (default False)
If set, the kernel will execute the code as quietly possible.
user_variables : list, optional
A list of variable names to pull from the user's namespace. They
will come back as a dict with these names as keys and their
:func:`repr` as values.
user_expressions : dict, optional
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
allow_stdin : bool, optional
Flag for
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
Returns
-------
The msg_id of the message sent. | Below is the the instruction that describes the task:
### Input:
Execute code in the kernel.
Parameters
----------
code : str
A string of Python code.
silent : bool, optional (default False)
If set, the kernel will execute the code as quietly possible.
user_variables : list, optional
A list of variable names to pull from the user's namespace. They
will come back as a dict with these names as keys and their
:func:`repr` as values.
user_expressions : dict, optional
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
allow_stdin : bool, optional
Flag for
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
Returns
-------
The msg_id of the message sent.
### Response:
def execute(self, code, silent=False,
user_variables=None, user_expressions=None, allow_stdin=None):
"""Execute code in the kernel.
Parameters
----------
code : str
A string of Python code.
silent : bool, optional (default False)
If set, the kernel will execute the code as quietly possible.
user_variables : list, optional
A list of variable names to pull from the user's namespace. They
will come back as a dict with these names as keys and their
:func:`repr` as values.
user_expressions : dict, optional
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
allow_stdin : bool, optional
Flag for
A dict with string keys and to pull from the user's
namespace. They will come back as a dict with these names as keys
and their :func:`repr` as values.
Returns
-------
The msg_id of the message sent.
"""
if user_variables is None:
user_variables = []
if user_expressions is None:
user_expressions = {}
if allow_stdin is None:
allow_stdin = self.allow_stdin
# Don't waste network traffic if inputs are invalid
if not isinstance(code, basestring):
raise ValueError('code %r must be a string' % code)
validate_string_list(user_variables)
validate_string_dict(user_expressions)
# Create class for content/msg creation. Related to, but possibly
# not in Session.
content = dict(code=code, silent=silent,
user_variables=user_variables,
user_expressions=user_expressions,
allow_stdin=allow_stdin,
)
msg = self.session.msg('execute_request', content)
self._queue_send(msg)
return msg['header']['msg_id'] |
def _load(self, url):
"""
Load from remote, but check local file content to identify duplicate content. If local file is found with
same hash then it is used with metadata from remote object to avoid fetching full content.
:param url:
:return:
"""
self._logger.debug('Loading url %s into resource cache' % url)
retriever = get_resource_retriever(url)
content_path = os.path.join(self.path, self._hash_path(url))
try:
if url in self.metadata:
headers = retriever.fetch(content_path, last_etag=self.metadata[url]['etag'], progress_bar=self.progress_bars)
if headers is None:
# no update, return what is already loaded
self._logger.info('Cached %s is up-to-date. No data download needed' % url)
return self.metadata[url]
else:
headers = retriever.fetch(content_path, progress_bar=self.progress_bars)
if headers is None:
raise Exception('Fetch of %s failed' % url)
if 'etag' not in headers:
# No Etag returned, so generate it
headers['etag'] = fs_util.calc_file_md5(content_path)
# Populate metadata from the headers
self.metadata[url] = headers.copy()
self.metadata[url]['content'] = content_path
return self.metadata[url]
except:
self._logger.error('Failed getting resource: %s')
# forcibly flush local entry if found
if url in self.metadata:
self.metadata.pop(url)
raise
finally:
if url not in self.metadata:
self._logger.debug('Cleaning up on failed load of %s' % url)
# Cleanup on failure
if content_path is not None and os.path.exists(content_path):
os.remove(content_path) | Load from remote, but check local file content to identify duplicate content. If local file is found with
same hash then it is used with metadata from remote object to avoid fetching full content.
:param url:
:return: | Below is the the instruction that describes the task:
### Input:
Load from remote, but check local file content to identify duplicate content. If local file is found with
same hash then it is used with metadata from remote object to avoid fetching full content.
:param url:
:return:
### Response:
def _load(self, url):
"""
Load from remote, but check local file content to identify duplicate content. If local file is found with
same hash then it is used with metadata from remote object to avoid fetching full content.
:param url:
:return:
"""
self._logger.debug('Loading url %s into resource cache' % url)
retriever = get_resource_retriever(url)
content_path = os.path.join(self.path, self._hash_path(url))
try:
if url in self.metadata:
headers = retriever.fetch(content_path, last_etag=self.metadata[url]['etag'], progress_bar=self.progress_bars)
if headers is None:
# no update, return what is already loaded
self._logger.info('Cached %s is up-to-date. No data download needed' % url)
return self.metadata[url]
else:
headers = retriever.fetch(content_path, progress_bar=self.progress_bars)
if headers is None:
raise Exception('Fetch of %s failed' % url)
if 'etag' not in headers:
# No Etag returned, so generate it
headers['etag'] = fs_util.calc_file_md5(content_path)
# Populate metadata from the headers
self.metadata[url] = headers.copy()
self.metadata[url]['content'] = content_path
return self.metadata[url]
except:
self._logger.error('Failed getting resource: %s')
# forcibly flush local entry if found
if url in self.metadata:
self.metadata.pop(url)
raise
finally:
if url not in self.metadata:
self._logger.debug('Cleaning up on failed load of %s' % url)
# Cleanup on failure
if content_path is not None and os.path.exists(content_path):
os.remove(content_path) |
def _sort_by_indep(self, func='get_value', i=None, iunit=None, unit=None,
uncover=None, trail=None, linebreak=None,
sort_by_indep=None):
"""
must be called before (or within) _do_linebreak
"""
if sort_by_indep is None:
# TODO: add property of the call?
sort_by_indep = True
indep_array = self.call.i.get_value(i=i,
unit=iunit,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=False)
this_array = getattr(self, func)(i=i,
unit=unit,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=False)
if not (isinstance(indep_array, np.ndarray) and len(indep_array)==len(this_array)):
sort_by_indep = False
if sort_by_indep:
# TODO: it might be nice to buffer this at the call level, so making
# multiple get_value calls doesn't have to recompute the sort-order
sort_inds = indep_array.argsort()
return this_array[sort_inds]
else:
return this_array | must be called before (or within) _do_linebreak | Below is the the instruction that describes the task:
### Input:
must be called before (or within) _do_linebreak
### Response:
def _sort_by_indep(self, func='get_value', i=None, iunit=None, unit=None,
uncover=None, trail=None, linebreak=None,
sort_by_indep=None):
"""
must be called before (or within) _do_linebreak
"""
if sort_by_indep is None:
# TODO: add property of the call?
sort_by_indep = True
indep_array = self.call.i.get_value(i=i,
unit=iunit,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=False)
this_array = getattr(self, func)(i=i,
unit=unit,
uncover=uncover,
trail=trail,
linebreak=False,
sort_by_indep=False)
if not (isinstance(indep_array, np.ndarray) and len(indep_array)==len(this_array)):
sort_by_indep = False
if sort_by_indep:
# TODO: it might be nice to buffer this at the call level, so making
# multiple get_value calls doesn't have to recompute the sort-order
sort_inds = indep_array.argsort()
return this_array[sort_inds]
else:
return this_array |
def get_bounds(self):
"""
Get the parameters of bounding box of the UI element.
Returns:
:obj:`list` <:obj:`float`>: 4-list (top, right, bottom, left) coordinates related to the edge of screen in
NormalizedCoordinate system
"""
size = self.get_size()
top_left = self.get_position([0, 0])
# t, r, b, l
bounds = [top_left[1], top_left[0] + size[0], top_left[1] + size[1], top_left[0]]
return bounds | Get the parameters of bounding box of the UI element.
Returns:
:obj:`list` <:obj:`float`>: 4-list (top, right, bottom, left) coordinates related to the edge of screen in
NormalizedCoordinate system | Below is the the instruction that describes the task:
### Input:
Get the parameters of bounding box of the UI element.
Returns:
:obj:`list` <:obj:`float`>: 4-list (top, right, bottom, left) coordinates related to the edge of screen in
NormalizedCoordinate system
### Response:
def get_bounds(self):
"""
Get the parameters of bounding box of the UI element.
Returns:
:obj:`list` <:obj:`float`>: 4-list (top, right, bottom, left) coordinates related to the edge of screen in
NormalizedCoordinate system
"""
size = self.get_size()
top_left = self.get_position([0, 0])
# t, r, b, l
bounds = [top_left[1], top_left[0] + size[0], top_left[1] + size[1], top_left[0]]
return bounds |
def host_stanzas(self, config_access):
"""
returns a list of host definitions
"""
defn_lines = self.resolve_defn(config_access)
for val_dict in self.variable_iter(config_access.get_variables()):
subst = list(self.apply_substitutions(defn_lines, val_dict))
host = subst[0]
lines = [ConfigOutput.to_line('Host', [host])] + subst[1:]
yield host, lines | returns a list of host definitions | Below is the the instruction that describes the task:
### Input:
returns a list of host definitions
### Response:
def host_stanzas(self, config_access):
"""
returns a list of host definitions
"""
defn_lines = self.resolve_defn(config_access)
for val_dict in self.variable_iter(config_access.get_variables()):
subst = list(self.apply_substitutions(defn_lines, val_dict))
host = subst[0]
lines = [ConfigOutput.to_line('Host', [host])] + subst[1:]
yield host, lines |
def send_mail(recipient, sender, subject="", content="", smtp_host="127.0.0.1", smtp_port=25):
"""
Lightweight mail functionality. Sends an mail from *sender* to *recipient* with *subject* and
*content*. *smtp_host* and *smtp_port* are forwarded to the ``smtplib.SMTP`` constructor. *True*
is returned on success, *False* otherwise.
"""
try:
server = smtplib.SMTP(smtp_host, smtp_port)
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning("cannot create SMTP server: {}".format(e))
return False
header = "From: {}\r\nTo: {}\r\nSubject: {}\r\n\r\n".format(sender, recipient, subject)
server.sendmail(sender, recipient, header + content)
return True | Lightweight mail functionality. Sends an mail from *sender* to *recipient* with *subject* and
*content*. *smtp_host* and *smtp_port* are forwarded to the ``smtplib.SMTP`` constructor. *True*
is returned on success, *False* otherwise. | Below is the the instruction that describes the task:
### Input:
Lightweight mail functionality. Sends an mail from *sender* to *recipient* with *subject* and
*content*. *smtp_host* and *smtp_port* are forwarded to the ``smtplib.SMTP`` constructor. *True*
is returned on success, *False* otherwise.
### Response:
def send_mail(recipient, sender, subject="", content="", smtp_host="127.0.0.1", smtp_port=25):
"""
Lightweight mail functionality. Sends an mail from *sender* to *recipient* with *subject* and
*content*. *smtp_host* and *smtp_port* are forwarded to the ``smtplib.SMTP`` constructor. *True*
is returned on success, *False* otherwise.
"""
try:
server = smtplib.SMTP(smtp_host, smtp_port)
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning("cannot create SMTP server: {}".format(e))
return False
header = "From: {}\r\nTo: {}\r\nSubject: {}\r\n\r\n".format(sender, recipient, subject)
server.sendmail(sender, recipient, header + content)
return True |
def mod_watch(name, sfun=None, **kwargs):
'''
The docker_container watcher, called to invoke the watch command.
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
'''
if sfun == 'running':
watch_kwargs = copy.deepcopy(kwargs)
if watch_kwargs.get('watch_action', 'force') == 'force':
watch_kwargs['force'] = True
else:
watch_kwargs['send_signal'] = True
watch_kwargs['force'] = False
return running(name, **watch_kwargs)
if sfun == 'stopped':
return stopped(name, **salt.utils.args.clean_kwargs(**kwargs))
if sfun == 'run':
return run(name, **salt.utils.args.clean_kwargs(**kwargs))
return {'name': name,
'changes': {},
'result': False,
'comment': ('watch requisite is not'
' implemented for {0}'.format(sfun))} | The docker_container watcher, called to invoke the watch command.
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered. | Below is the the instruction that describes the task:
### Input:
The docker_container watcher, called to invoke the watch command.
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
### Response:
def mod_watch(name, sfun=None, **kwargs):
'''
The docker_container watcher, called to invoke the watch command.
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
'''
if sfun == 'running':
watch_kwargs = copy.deepcopy(kwargs)
if watch_kwargs.get('watch_action', 'force') == 'force':
watch_kwargs['force'] = True
else:
watch_kwargs['send_signal'] = True
watch_kwargs['force'] = False
return running(name, **watch_kwargs)
if sfun == 'stopped':
return stopped(name, **salt.utils.args.clean_kwargs(**kwargs))
if sfun == 'run':
return run(name, **salt.utils.args.clean_kwargs(**kwargs))
return {'name': name,
'changes': {},
'result': False,
'comment': ('watch requisite is not'
' implemented for {0}'.format(sfun))} |
def get_tasks(self,
identifier=None,
task_id=None,
task_type=None,
params=None,
config=None,
verbose=None,
request_kwargs=None):
"""Get tasks from the Archive.org catalog. ``internetarchive`` must be configured
with your logged-in-* cookies to use this function. If no arguments are provided,
all queued tasks for the user will be returned.
:type identifier: str
:param identifier: (optional) The Archive.org identifier for which to retrieve
tasks for.
:type task_id: int or str
:param task_id: (optional) The task_id to retrieve from the Archive.org catalog.
:type task_type: str
:param task_type: (optional) The type of tasks to retrieve from the Archive.org
catalog. The types can be either "red" for failed tasks, "blue"
for running tasks, "green" for pending tasks, "brown" for paused
tasks, or "purple" for completed tasks.
:type params: dict
:param params: (optional) The URL parameters to send with each request sent to the
Archive.org catalog API.
:type config: dict
:param secure: (optional) Configuration options for session.
:type verbose: bool
:param verbose: (optional) Set to ``True`` to retrieve verbose information for
each catalog task returned. verbose is set to ``True`` by default.
:returns: A set of :class:`CatalogTask` objects.
"""
request_kwargs = {} if not request_kwargs else request_kwargs
_catalog = Catalog(self,
identifier=identifier,
task_id=task_id,
params=params,
config=config,
verbose=verbose,
request_kwargs=request_kwargs)
if task_type:
return eval('_catalog.{0}_rows'.format(task_type.lower()))
else:
return _catalog.tasks | Get tasks from the Archive.org catalog. ``internetarchive`` must be configured
with your logged-in-* cookies to use this function. If no arguments are provided,
all queued tasks for the user will be returned.
:type identifier: str
:param identifier: (optional) The Archive.org identifier for which to retrieve
tasks for.
:type task_id: int or str
:param task_id: (optional) The task_id to retrieve from the Archive.org catalog.
:type task_type: str
:param task_type: (optional) The type of tasks to retrieve from the Archive.org
catalog. The types can be either "red" for failed tasks, "blue"
for running tasks, "green" for pending tasks, "brown" for paused
tasks, or "purple" for completed tasks.
:type params: dict
:param params: (optional) The URL parameters to send with each request sent to the
Archive.org catalog API.
:type config: dict
:param secure: (optional) Configuration options for session.
:type verbose: bool
:param verbose: (optional) Set to ``True`` to retrieve verbose information for
each catalog task returned. verbose is set to ``True`` by default.
:returns: A set of :class:`CatalogTask` objects. | Below is the the instruction that describes the task:
### Input:
Get tasks from the Archive.org catalog. ``internetarchive`` must be configured
with your logged-in-* cookies to use this function. If no arguments are provided,
all queued tasks for the user will be returned.
:type identifier: str
:param identifier: (optional) The Archive.org identifier for which to retrieve
tasks for.
:type task_id: int or str
:param task_id: (optional) The task_id to retrieve from the Archive.org catalog.
:type task_type: str
:param task_type: (optional) The type of tasks to retrieve from the Archive.org
catalog. The types can be either "red" for failed tasks, "blue"
for running tasks, "green" for pending tasks, "brown" for paused
tasks, or "purple" for completed tasks.
:type params: dict
:param params: (optional) The URL parameters to send with each request sent to the
Archive.org catalog API.
:type config: dict
:param secure: (optional) Configuration options for session.
:type verbose: bool
:param verbose: (optional) Set to ``True`` to retrieve verbose information for
each catalog task returned. verbose is set to ``True`` by default.
:returns: A set of :class:`CatalogTask` objects.
### Response:
def get_tasks(self,
identifier=None,
task_id=None,
task_type=None,
params=None,
config=None,
verbose=None,
request_kwargs=None):
"""Get tasks from the Archive.org catalog. ``internetarchive`` must be configured
with your logged-in-* cookies to use this function. If no arguments are provided,
all queued tasks for the user will be returned.
:type identifier: str
:param identifier: (optional) The Archive.org identifier for which to retrieve
tasks for.
:type task_id: int or str
:param task_id: (optional) The task_id to retrieve from the Archive.org catalog.
:type task_type: str
:param task_type: (optional) The type of tasks to retrieve from the Archive.org
catalog. The types can be either "red" for failed tasks, "blue"
for running tasks, "green" for pending tasks, "brown" for paused
tasks, or "purple" for completed tasks.
:type params: dict
:param params: (optional) The URL parameters to send with each request sent to the
Archive.org catalog API.
:type config: dict
:param secure: (optional) Configuration options for session.
:type verbose: bool
:param verbose: (optional) Set to ``True`` to retrieve verbose information for
each catalog task returned. verbose is set to ``True`` by default.
:returns: A set of :class:`CatalogTask` objects.
"""
request_kwargs = {} if not request_kwargs else request_kwargs
_catalog = Catalog(self,
identifier=identifier,
task_id=task_id,
params=params,
config=config,
verbose=verbose,
request_kwargs=request_kwargs)
if task_type:
return eval('_catalog.{0}_rows'.format(task_type.lower()))
else:
return _catalog.tasks |
def _find_sources(im, target, sources, polarity):
"""Get the subset of source nodes with paths to the target.
Given a target, a list of sources, and a path polarity, perform a
breadth-first search upstream from the target to determine whether any of
the queried sources have paths to the target with the appropriate polarity.
For efficiency, does not return the full path, but identifies the upstream
sources and the length of the path.
Parameters
----------
im : networkx.MultiDiGraph
Graph containing the influence map.
target : str
The node (rule name) in the influence map to start looking upstream for
marching sources.
sources : list of str
The nodes (rules) corresponding to the subject or upstream influence
being checked.
polarity : int
Required polarity of the path between source and target.
Returns
-------
generator of (source, polarity, path_length)
Yields tuples of source node (string), polarity (int) and path length
(int). If there are no paths to any of the given source nodes, the
generator isignempty.
"""
# First, create a list of visited nodes
# Adapted from
# networkx.algorithms.traversal.breadth_first_search.bfs_edges
visited = set([(target, 1)])
# Generate list of predecessor nodes with a sign updated according to the
# sign of the target node
target_tuple = (target, 1)
# The queue holds tuples of "parents" (in this case downstream nodes) and
# their "children" (in this case their upstream influencers)
queue = deque([(target_tuple, _get_signed_predecessors(im, target, 1), 0)])
while queue:
parent, children, path_length = queue[0]
try:
# Get the next child in the list
(child, sign) = next(children)
# Is this child one of the source nodes we're looking for? If so,
# yield it along with path length.
if (sources is None or child in sources) and sign == polarity:
logger.debug("Found path to %s from %s with desired sign %s "
"with length %d" %
(target, child, polarity, path_length+1))
yield (child, sign, path_length+1)
# Check this child against the visited list. If we haven't visited
# it already (accounting for the path to the node), then add it
# to the queue.
if (child, sign) not in visited:
visited.add((child, sign))
queue.append(((child, sign),
_get_signed_predecessors(im, child, sign),
path_length + 1))
# Once we've finished iterating over the children of the current node,
# pop the node off and go to the next one in the queue
except StopIteration:
queue.popleft()
# There was no path; this will produce an empty generator
return | Get the subset of source nodes with paths to the target.
Given a target, a list of sources, and a path polarity, perform a
breadth-first search upstream from the target to determine whether any of
the queried sources have paths to the target with the appropriate polarity.
For efficiency, does not return the full path, but identifies the upstream
sources and the length of the path.
Parameters
----------
im : networkx.MultiDiGraph
Graph containing the influence map.
target : str
The node (rule name) in the influence map to start looking upstream for
marching sources.
sources : list of str
The nodes (rules) corresponding to the subject or upstream influence
being checked.
polarity : int
Required polarity of the path between source and target.
Returns
-------
generator of (source, polarity, path_length)
Yields tuples of source node (string), polarity (int) and path length
(int). If there are no paths to any of the given source nodes, the
generator isignempty. | Below is the the instruction that describes the task:
### Input:
Get the subset of source nodes with paths to the target.
Given a target, a list of sources, and a path polarity, perform a
breadth-first search upstream from the target to determine whether any of
the queried sources have paths to the target with the appropriate polarity.
For efficiency, does not return the full path, but identifies the upstream
sources and the length of the path.
Parameters
----------
im : networkx.MultiDiGraph
Graph containing the influence map.
target : str
The node (rule name) in the influence map to start looking upstream for
marching sources.
sources : list of str
The nodes (rules) corresponding to the subject or upstream influence
being checked.
polarity : int
Required polarity of the path between source and target.
Returns
-------
generator of (source, polarity, path_length)
Yields tuples of source node (string), polarity (int) and path length
(int). If there are no paths to any of the given source nodes, the
generator isignempty.
### Response:
def _find_sources(im, target, sources, polarity):
"""Get the subset of source nodes with paths to the target.
Given a target, a list of sources, and a path polarity, perform a
breadth-first search upstream from the target to determine whether any of
the queried sources have paths to the target with the appropriate polarity.
For efficiency, does not return the full path, but identifies the upstream
sources and the length of the path.
Parameters
----------
im : networkx.MultiDiGraph
Graph containing the influence map.
target : str
The node (rule name) in the influence map to start looking upstream for
marching sources.
sources : list of str
The nodes (rules) corresponding to the subject or upstream influence
being checked.
polarity : int
Required polarity of the path between source and target.
Returns
-------
generator of (source, polarity, path_length)
Yields tuples of source node (string), polarity (int) and path length
(int). If there are no paths to any of the given source nodes, the
generator isignempty.
"""
# First, create a list of visited nodes
# Adapted from
# networkx.algorithms.traversal.breadth_first_search.bfs_edges
visited = set([(target, 1)])
# Generate list of predecessor nodes with a sign updated according to the
# sign of the target node
target_tuple = (target, 1)
# The queue holds tuples of "parents" (in this case downstream nodes) and
# their "children" (in this case their upstream influencers)
queue = deque([(target_tuple, _get_signed_predecessors(im, target, 1), 0)])
while queue:
parent, children, path_length = queue[0]
try:
# Get the next child in the list
(child, sign) = next(children)
# Is this child one of the source nodes we're looking for? If so,
# yield it along with path length.
if (sources is None or child in sources) and sign == polarity:
logger.debug("Found path to %s from %s with desired sign %s "
"with length %d" %
(target, child, polarity, path_length+1))
yield (child, sign, path_length+1)
# Check this child against the visited list. If we haven't visited
# it already (accounting for the path to the node), then add it
# to the queue.
if (child, sign) not in visited:
visited.add((child, sign))
queue.append(((child, sign),
_get_signed_predecessors(im, child, sign),
path_length + 1))
# Once we've finished iterating over the children of the current node,
# pop the node off and go to the next one in the queue
except StopIteration:
queue.popleft()
# There was no path; this will produce an empty generator
return |
def print(self):
"""Print self."""
print(
'{dim}Identifier:{none} {cyan}{identifier}{none}\n'
'{dim}Name:{none} {name}\n'
'{dim}Description:{none}\n{description}'.format(
dim=Style.DIM,
cyan=Fore.CYAN,
none=Style.RESET_ALL,
identifier=self.identifier,
name=self.name,
description=pretty_description(self.description, indent=2)
)
)
if hasattr(self, 'argument_list') and self.argument_list:
print('{dim}Arguments:{none}'.format(
dim=Style.DIM, none=Style.RESET_ALL))
for argument in self.argument_list:
argument.print(indent=2) | Print self. | Below is the the instruction that describes the task:
### Input:
Print self.
### Response:
def print(self):
"""Print self."""
print(
'{dim}Identifier:{none} {cyan}{identifier}{none}\n'
'{dim}Name:{none} {name}\n'
'{dim}Description:{none}\n{description}'.format(
dim=Style.DIM,
cyan=Fore.CYAN,
none=Style.RESET_ALL,
identifier=self.identifier,
name=self.name,
description=pretty_description(self.description, indent=2)
)
)
if hasattr(self, 'argument_list') and self.argument_list:
print('{dim}Arguments:{none}'.format(
dim=Style.DIM, none=Style.RESET_ALL))
for argument in self.argument_list:
argument.print(indent=2) |
def _append_to_data_parts(self, data, start, end):
'''
Intelligently appends data to self.string_parts.
For use by self._format.
'''
try:
while data[start] == ' ':
start += 1
if start == end:
end = len(data[start:])
self.string_parts.append(data[start:end])
except KeyboardInterrupt as e:
raise e
except Exception:
try:
self.string_parts.append(data[start:])
except KeyboardInterrupt as e:
raise e
except Exception:
pass
return start | Intelligently appends data to self.string_parts.
For use by self._format. | Below is the the instruction that describes the task:
### Input:
Intelligently appends data to self.string_parts.
For use by self._format.
### Response:
def _append_to_data_parts(self, data, start, end):
'''
Intelligently appends data to self.string_parts.
For use by self._format.
'''
try:
while data[start] == ' ':
start += 1
if start == end:
end = len(data[start:])
self.string_parts.append(data[start:end])
except KeyboardInterrupt as e:
raise e
except Exception:
try:
self.string_parts.append(data[start:])
except KeyboardInterrupt as e:
raise e
except Exception:
pass
return start |
def dispose_qualification_type(self, qualification_id):
"""Remove a qualification type we created"""
return self._is_ok(
self.mturk.delete_qualification_type(QualificationTypeId=qualification_id)
) | Remove a qualification type we created | Below is the the instruction that describes the task:
### Input:
Remove a qualification type we created
### Response:
def dispose_qualification_type(self, qualification_id):
"""Remove a qualification type we created"""
return self._is_ok(
self.mturk.delete_qualification_type(QualificationTypeId=qualification_id)
) |
def before_app_websocket(self, func: Callable) -> Callable:
"""Add a before request websocket to the App.
This is designed to be used as a decorator, and has the same arguments
as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the
app this blueprint is registered on. An example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.before_app_websocket
def before():
...
"""
self.record_once(lambda state: state.app.before_websocket(func))
return func | Add a before request websocket to the App.
This is designed to be used as a decorator, and has the same arguments
as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the
app this blueprint is registered on. An example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.before_app_websocket
def before():
... | Below is the the instruction that describes the task:
### Input:
Add a before request websocket to the App.
This is designed to be used as a decorator, and has the same arguments
as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the
app this blueprint is registered on. An example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.before_app_websocket
def before():
...
### Response:
def before_app_websocket(self, func: Callable) -> Callable:
"""Add a before request websocket to the App.
This is designed to be used as a decorator, and has the same arguments
as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the
app this blueprint is registered on. An example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.before_app_websocket
def before():
...
"""
self.record_once(lambda state: state.app.before_websocket(func))
return func |
def factory(fileobject, jfs, parentpath): # fileobject from lxml.objectify
'Class method to get the correct file class instatiated'
if hasattr(fileobject, 'currentRevision'): # a normal file
return JFSFile(fileobject, jfs, parentpath)
elif str(fileobject.latestRevision.state) == ProtoFile.STATE_INCOMPLETE:
return JFSIncompleteFile(fileobject, jfs, parentpath)
elif str(fileobject.latestRevision.state) == ProtoFile.STATE_CORRUPT:
return JFSCorruptFile(fileobject, jfs, parentpath)
else:
raise NotImplementedError('No JFS*File support for state %r. Please file a bug!' % fileobject.latestRevision.state) | Class method to get the correct file class instatiated | Below is the the instruction that describes the task:
### Input:
Class method to get the correct file class instatiated
### Response:
def factory(fileobject, jfs, parentpath): # fileobject from lxml.objectify
'Class method to get the correct file class instatiated'
if hasattr(fileobject, 'currentRevision'): # a normal file
return JFSFile(fileobject, jfs, parentpath)
elif str(fileobject.latestRevision.state) == ProtoFile.STATE_INCOMPLETE:
return JFSIncompleteFile(fileobject, jfs, parentpath)
elif str(fileobject.latestRevision.state) == ProtoFile.STATE_CORRUPT:
return JFSCorruptFile(fileobject, jfs, parentpath)
else:
raise NotImplementedError('No JFS*File support for state %r. Please file a bug!' % fileobject.latestRevision.state) |
def _normalize_url(graph: BELGraph, keyword: str) -> Optional[str]: # FIXME move to utilities and unit test
"""Normalize a URL for the BEL graph."""
if keyword == BEL_DEFAULT_NAMESPACE and BEL_DEFAULT_NAMESPACE not in graph.namespace_url:
return BEL_DEFAULT_NAMESPACE_URL
return graph.namespace_url.get(keyword) | Normalize a URL for the BEL graph. | Below is the the instruction that describes the task:
### Input:
Normalize a URL for the BEL graph.
### Response:
def _normalize_url(graph: BELGraph, keyword: str) -> Optional[str]: # FIXME move to utilities and unit test
"""Normalize a URL for the BEL graph."""
if keyword == BEL_DEFAULT_NAMESPACE and BEL_DEFAULT_NAMESPACE not in graph.namespace_url:
return BEL_DEFAULT_NAMESPACE_URL
return graph.namespace_url.get(keyword) |
def from_global_moment_and_saxis(cls, global_moment, saxis):
"""
Convenience method to initialize Magmom from a given global
magnetic moment, i.e. magnetic moment with saxis=(0,0,1), and
provided saxis.
Method is useful if you do not know the components of your
magnetic moment in frame of your desired saxis.
:param global_moment:
:param saxis: desired saxis
:return:
"""
magmom = Magmom(global_moment)
return cls(magmom.get_moment(saxis=saxis), saxis=saxis) | Convenience method to initialize Magmom from a given global
magnetic moment, i.e. magnetic moment with saxis=(0,0,1), and
provided saxis.
Method is useful if you do not know the components of your
magnetic moment in frame of your desired saxis.
:param global_moment:
:param saxis: desired saxis
:return: | Below is the the instruction that describes the task:
### Input:
Convenience method to initialize Magmom from a given global
magnetic moment, i.e. magnetic moment with saxis=(0,0,1), and
provided saxis.
Method is useful if you do not know the components of your
magnetic moment in frame of your desired saxis.
:param global_moment:
:param saxis: desired saxis
:return:
### Response:
def from_global_moment_and_saxis(cls, global_moment, saxis):
"""
Convenience method to initialize Magmom from a given global
magnetic moment, i.e. magnetic moment with saxis=(0,0,1), and
provided saxis.
Method is useful if you do not know the components of your
magnetic moment in frame of your desired saxis.
:param global_moment:
:param saxis: desired saxis
:return:
"""
magmom = Magmom(global_moment)
return cls(magmom.get_moment(saxis=saxis), saxis=saxis) |
def to_date(self):
"""
construct datetime.date instance represented calendar date of BusinessDate instance
:return datetime.date:
"""
y, m, d = self.to_ymd()
return date(y, m, d) | construct datetime.date instance represented calendar date of BusinessDate instance
:return datetime.date: | Below is the the instruction that describes the task:
### Input:
construct datetime.date instance represented calendar date of BusinessDate instance
:return datetime.date:
### Response:
def to_date(self):
"""
construct datetime.date instance represented calendar date of BusinessDate instance
:return datetime.date:
"""
y, m, d = self.to_ymd()
return date(y, m, d) |
def handle(self, handler):
""" register a handler (add a callback function) """
with self._hlock:
self._handler_list.append(handler)
return self | register a handler (add a callback function) | Below is the the instruction that describes the task:
### Input:
register a handler (add a callback function)
### Response:
def handle(self, handler):
""" register a handler (add a callback function) """
with self._hlock:
self._handler_list.append(handler)
return self |
def get_float_value(self, section, option, default=0.0):
"""Get the float value of an option, if it exists."""
try:
return self.parser.getfloat(section, option)
except NoOptionError:
return float(default) | Get the float value of an option, if it exists. | Below is the the instruction that describes the task:
### Input:
Get the float value of an option, if it exists.
### Response:
def get_float_value(self, section, option, default=0.0):
"""Get the float value of an option, if it exists."""
try:
return self.parser.getfloat(section, option)
except NoOptionError:
return float(default) |
def db_en010(self, value=None):
""" Corresponds to IDD Field `db_en010`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 1.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_en010`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db_en010`'.format(value))
self._db_en010 = value | Corresponds to IDD Field `db_en010`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 1.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_en010`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | Below is the the instruction that describes the task:
### Input:
Corresponds to IDD Field `db_en010`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 1.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_en010`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
### Response:
def db_en010(self, value=None):
""" Corresponds to IDD Field `db_en010`
mean coincident dry-bulb temperature to
Enthalpy corresponding to 1.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_en010`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db_en010`'.format(value))
self._db_en010 = value |
def do_gdbserver(self):
"""! @brief Handle 'gdbserver' subcommand."""
self._process_commands(self._args.commands)
gdbs = []
try:
# Build dict of session options.
sessionOptions = convert_session_options(self._args.options)
sessionOptions.update({
'gdbserver_port' : self._args.port_number,
'telnet_port' : self._args.telnet_port,
'persist' : self._args.persist,
'step_into_interrupt' : self._args.step_into_interrupt,
'chip_erase': ERASE_OPTIONS[self._args.erase],
'fast_program' : self._args.trust_crc,
'enable_semihosting' : self._args.enable_semihosting,
'serve_local_only' : self._args.serve_local_only,
'vector_catch' : self._args.vector_catch,
})
session = ConnectHelper.session_with_chosen_probe(
blocking=(not self._args.no_wait),
project_dir=self._args.project_dir,
user_script=self._args.script,
config_file=self._args.config,
no_config=self._args.no_config,
pack=self._args.pack,
unique_id=self._args.unique_id,
target_override=self._args.target_override,
frequency=self._args.frequency,
**sessionOptions)
if session is None:
LOG.error("No probe selected.")
return
with session:
# Set ELF if provided.
if self._args.elf:
session.board.target.elf = self._args.elf
for core_number, core in session.board.target.cores.items():
gdb = GDBServer(session,
core=core_number,
server_listening_callback=self.server_listening)
gdbs.append(gdb)
gdb = gdbs[0]
while gdb.isAlive():
gdb.join(timeout=0.5)
except (KeyboardInterrupt, Exception):
for gdb in gdbs:
gdb.stop()
raise | ! @brief Handle 'gdbserver' subcommand. | Below is the the instruction that describes the task:
### Input:
! @brief Handle 'gdbserver' subcommand.
### Response:
def do_gdbserver(self):
"""! @brief Handle 'gdbserver' subcommand."""
self._process_commands(self._args.commands)
gdbs = []
try:
# Build dict of session options.
sessionOptions = convert_session_options(self._args.options)
sessionOptions.update({
'gdbserver_port' : self._args.port_number,
'telnet_port' : self._args.telnet_port,
'persist' : self._args.persist,
'step_into_interrupt' : self._args.step_into_interrupt,
'chip_erase': ERASE_OPTIONS[self._args.erase],
'fast_program' : self._args.trust_crc,
'enable_semihosting' : self._args.enable_semihosting,
'serve_local_only' : self._args.serve_local_only,
'vector_catch' : self._args.vector_catch,
})
session = ConnectHelper.session_with_chosen_probe(
blocking=(not self._args.no_wait),
project_dir=self._args.project_dir,
user_script=self._args.script,
config_file=self._args.config,
no_config=self._args.no_config,
pack=self._args.pack,
unique_id=self._args.unique_id,
target_override=self._args.target_override,
frequency=self._args.frequency,
**sessionOptions)
if session is None:
LOG.error("No probe selected.")
return
with session:
# Set ELF if provided.
if self._args.elf:
session.board.target.elf = self._args.elf
for core_number, core in session.board.target.cores.items():
gdb = GDBServer(session,
core=core_number,
server_listening_callback=self.server_listening)
gdbs.append(gdb)
gdb = gdbs[0]
while gdb.isAlive():
gdb.join(timeout=0.5)
except (KeyboardInterrupt, Exception):
for gdb in gdbs:
gdb.stop()
raise |
def make(parser):
"""provison Manila Share with HA"""
s = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
def install_f(args):
install(args)
install_parser = install_subparser(s)
install_parser.set_defaults(func=install_f) | provison Manila Share with HA | Below is the the instruction that describes the task:
### Input:
provison Manila Share with HA
### Response:
def make(parser):
"""provison Manila Share with HA"""
s = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
def install_f(args):
install(args)
install_parser = install_subparser(s)
install_parser.set_defaults(func=install_f) |
def add_var_condor_cmd(self, command):
"""
Add a condor command to the submit file that allows variable (macro)
arguments to be passes to the executable.
"""
if command not in self.__var_cmds:
self.__var_cmds.append(command)
macro = self.__bad_macro_chars.sub( r'', command )
self.add_condor_cmd(command, '$(macro' + macro + ')') | Add a condor command to the submit file that allows variable (macro)
arguments to be passes to the executable. | Below is the the instruction that describes the task:
### Input:
Add a condor command to the submit file that allows variable (macro)
arguments to be passes to the executable.
### Response:
def add_var_condor_cmd(self, command):
"""
Add a condor command to the submit file that allows variable (macro)
arguments to be passes to the executable.
"""
if command not in self.__var_cmds:
self.__var_cmds.append(command)
macro = self.__bad_macro_chars.sub( r'', command )
self.add_condor_cmd(command, '$(macro' + macro + ')') |
def parse(self, name, description):
"""
Parse option name.
:param name: option's name
:param description: option's description
Parsing acceptable names:
* -f: shortname
* --force: longname
* -f, --force: shortname and longname
* -o <output>: shortname and a required value
* -o, --output [output]: shortname, longname and optional value
Parsing default value from description:
* source directory, default: src
* source directory, default: [src]
* source directory, default: <src>
"""
name = name.strip()
if '<' in name:
self.required = True
self.boolean = False
name = name[:name.index('<')].strip()
elif '[' in name:
self.required = False
self.boolean = False
name = name[:name.index('[')].strip()
else:
self.required = False
self.boolean = True
regex = re.compile(r'(-\w)?(?:\,\s*)?(--[\w\-]+)?')
m = regex.findall(name)
if not m:
raise ValueError('Invalid Option: %s', name)
shortname, longname = m[0]
if not shortname and not longname:
raise ValueError('Invalid Option: %s', name)
self.shortname = shortname
self.longname = longname
# parse store key
if longname and longname.startswith('--no-'):
self.key = longname[5:]
elif longname:
self.key = longname[2:]
else:
self.key = shortname
if self.boolean:
# boolean don't need to parse from description
if longname and longname.startswith('--no-'):
self.default = True
else:
self.default = False
return self
if not description:
self.default = None
return self
# parse default value from description
regex = re.compile(r'\sdefault:(.*)$')
m = regex.findall(description)
if not m:
self.default = None
return self
# if it has a default value, it is not required
self.required = False
value = m[0].strip()
if value.startswith('<') and value.endswith('>'):
value = value[1:-1]
elif value.startswith('[') and value.endswith(']'):
value = value[1:-1]
self.default = value.strip()
return self | Parse option name.
:param name: option's name
:param description: option's description
Parsing acceptable names:
* -f: shortname
* --force: longname
* -f, --force: shortname and longname
* -o <output>: shortname and a required value
* -o, --output [output]: shortname, longname and optional value
Parsing default value from description:
* source directory, default: src
* source directory, default: [src]
* source directory, default: <src> | Below is the the instruction that describes the task:
### Input:
Parse option name.
:param name: option's name
:param description: option's description
Parsing acceptable names:
* -f: shortname
* --force: longname
* -f, --force: shortname and longname
* -o <output>: shortname and a required value
* -o, --output [output]: shortname, longname and optional value
Parsing default value from description:
* source directory, default: src
* source directory, default: [src]
* source directory, default: <src>
### Response:
def parse(self, name, description):
"""
Parse option name.
:param name: option's name
:param description: option's description
Parsing acceptable names:
* -f: shortname
* --force: longname
* -f, --force: shortname and longname
* -o <output>: shortname and a required value
* -o, --output [output]: shortname, longname and optional value
Parsing default value from description:
* source directory, default: src
* source directory, default: [src]
* source directory, default: <src>
"""
name = name.strip()
if '<' in name:
self.required = True
self.boolean = False
name = name[:name.index('<')].strip()
elif '[' in name:
self.required = False
self.boolean = False
name = name[:name.index('[')].strip()
else:
self.required = False
self.boolean = True
regex = re.compile(r'(-\w)?(?:\,\s*)?(--[\w\-]+)?')
m = regex.findall(name)
if not m:
raise ValueError('Invalid Option: %s', name)
shortname, longname = m[0]
if not shortname and not longname:
raise ValueError('Invalid Option: %s', name)
self.shortname = shortname
self.longname = longname
# parse store key
if longname and longname.startswith('--no-'):
self.key = longname[5:]
elif longname:
self.key = longname[2:]
else:
self.key = shortname
if self.boolean:
# boolean don't need to parse from description
if longname and longname.startswith('--no-'):
self.default = True
else:
self.default = False
return self
if not description:
self.default = None
return self
# parse default value from description
regex = re.compile(r'\sdefault:(.*)$')
m = regex.findall(description)
if not m:
self.default = None
return self
# if it has a default value, it is not required
self.required = False
value = m[0].strip()
if value.startswith('<') and value.endswith('>'):
value = value[1:-1]
elif value.startswith('[') and value.endswith(']'):
value = value[1:-1]
self.default = value.strip()
return self |
def validate_number_attribute(self,
attribute: str,
value_type: Union[Type[int], Type[float]] = int,
minimum: Optional[Union[int, float]] = None,
maximum: Optional[Union[int, float]] = None) -> None:
""" Validates that the attribute contains a numeric value within boundaries if specified """
self.add_errors(
validate_number_attribute(self.fully_qualified_name, self._spec, attribute, value_type,
minimum, maximum)) | Validates that the attribute contains a numeric value within boundaries if specified | Below is the the instruction that describes the task:
### Input:
Validates that the attribute contains a numeric value within boundaries if specified
### Response:
def validate_number_attribute(self,
attribute: str,
value_type: Union[Type[int], Type[float]] = int,
minimum: Optional[Union[int, float]] = None,
maximum: Optional[Union[int, float]] = None) -> None:
""" Validates that the attribute contains a numeric value within boundaries if specified """
self.add_errors(
validate_number_attribute(self.fully_qualified_name, self._spec, attribute, value_type,
minimum, maximum)) |
def end(self, *args):
"""
End a nested log.
"""
if self._is_verbose:
# verbose log has no end method
return self
if not args:
self._indent -= 1
return self
self.writeln('end', *args)
self._indent -= 1
return self | End a nested log. | Below is the the instruction that describes the task:
### Input:
End a nested log.
### Response:
def end(self, *args):
"""
End a nested log.
"""
if self._is_verbose:
# verbose log has no end method
return self
if not args:
self._indent -= 1
return self
self.writeln('end', *args)
self._indent -= 1
return self |
def create_new_code(cls,**kwargs):
'''
Creates a new Voucher with a unique voucherId
'''
prefix = kwargs.pop('prefix','')
new = False
while not new:
# Standard is a ten-letter random string of uppercase letters
random_string = ''.join(random.choice(string.ascii_uppercase) for z in range(10))
if not Voucher.objects.filter(voucherId='%s%s' % (prefix, random_string)).exists():
new = True
return Voucher.objects.create(voucherId='%s%s' % (prefix, random_string),**kwargs) | Creates a new Voucher with a unique voucherId | Below is the the instruction that describes the task:
### Input:
Creates a new Voucher with a unique voucherId
### Response:
def create_new_code(cls,**kwargs):
'''
Creates a new Voucher with a unique voucherId
'''
prefix = kwargs.pop('prefix','')
new = False
while not new:
# Standard is a ten-letter random string of uppercase letters
random_string = ''.join(random.choice(string.ascii_uppercase) for z in range(10))
if not Voucher.objects.filter(voucherId='%s%s' % (prefix, random_string)).exists():
new = True
return Voucher.objects.create(voucherId='%s%s' % (prefix, random_string),**kwargs) |
def sync(self, folders):
"""Syncs a list of folders to their assicated buckets.
folders: A list of 2-tuples in the form (folder, bucket)
"""
if not folders:
raise ValueError("No folders to sync given")
for folder in folders:
self.sync_folder(*folder) | Syncs a list of folders to their assicated buckets.
folders: A list of 2-tuples in the form (folder, bucket) | Below is the the instruction that describes the task:
### Input:
Syncs a list of folders to their assicated buckets.
folders: A list of 2-tuples in the form (folder, bucket)
### Response:
def sync(self, folders):
"""Syncs a list of folders to their assicated buckets.
folders: A list of 2-tuples in the form (folder, bucket)
"""
if not folders:
raise ValueError("No folders to sync given")
for folder in folders:
self.sync_folder(*folder) |
def configure(self, kubernetes_host, kubernetes_ca_cert='', token_reviewer_jwt='', pem_keys=None,
mount_point=DEFAULT_MOUNT_POINT):
"""Configure the connection parameters for Kubernetes.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param kubernetes_host: Host must be a host string, a host:port pair, or a URL to the base of the
Kubernetes API server. Example: https://k8s.example.com:443
:type kubernetes_host: str | unicode
:param kubernetes_ca_cert: PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API.
NOTE: Every line must end with a newline: \n
:type kubernetes_ca_cert: str | unicode
:param token_reviewer_jwt: A service account JWT used to access the TokenReview API to validate other
JWTs during login. If not set the JWT used for login will be used to access the API.
:type token_reviewer_jwt: str | unicode
:param pem_keys: Optional list of PEM-formatted public keys or certificates used to verify the signatures of
Kubernetes service account JWTs. If a certificate is given, its public key will be extracted. Not every
installation of Kubernetes exposes these keys.
:type pem_keys: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure_method request.
:rtype: requests.Response
"""
if pem_keys is None:
pem_keys = []
list_of_pem_params = {
'kubernetes_ca_cert': kubernetes_ca_cert,
'pem_keys': pem_keys
}
for param_name, param_argument in list_of_pem_params.items():
validate_pem_format(
param_name=param_name,
param_argument=param_argument,
)
params = {
'kubernetes_host': kubernetes_host,
'kubernetes_ca_cert': kubernetes_ca_cert,
'token_reviewer_jwt': token_reviewer_jwt,
'pem_keys': pem_keys,
}
api_path = '/v1/auth/{mount_point}/config'.format(
mount_point=mount_point
)
return self._adapter.post(
url=api_path,
json=params,
) | Configure the connection parameters for Kubernetes.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param kubernetes_host: Host must be a host string, a host:port pair, or a URL to the base of the
Kubernetes API server. Example: https://k8s.example.com:443
:type kubernetes_host: str | unicode
:param kubernetes_ca_cert: PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API.
NOTE: Every line must end with a newline: \n
:type kubernetes_ca_cert: str | unicode
:param token_reviewer_jwt: A service account JWT used to access the TokenReview API to validate other
JWTs during login. If not set the JWT used for login will be used to access the API.
:type token_reviewer_jwt: str | unicode
:param pem_keys: Optional list of PEM-formatted public keys or certificates used to verify the signatures of
Kubernetes service account JWTs. If a certificate is given, its public key will be extracted. Not every
installation of Kubernetes exposes these keys.
:type pem_keys: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure_method request.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Configure the connection parameters for Kubernetes.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param kubernetes_host: Host must be a host string, a host:port pair, or a URL to the base of the
Kubernetes API server. Example: https://k8s.example.com:443
:type kubernetes_host: str | unicode
:param kubernetes_ca_cert: PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API.
NOTE: Every line must end with a newline: \n
:type kubernetes_ca_cert: str | unicode
:param token_reviewer_jwt: A service account JWT used to access the TokenReview API to validate other
JWTs during login. If not set the JWT used for login will be used to access the API.
:type token_reviewer_jwt: str | unicode
:param pem_keys: Optional list of PEM-formatted public keys or certificates used to verify the signatures of
Kubernetes service account JWTs. If a certificate is given, its public key will be extracted. Not every
installation of Kubernetes exposes these keys.
:type pem_keys: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure_method request.
:rtype: requests.Response
### Response:
def configure(self, kubernetes_host, kubernetes_ca_cert='', token_reviewer_jwt='', pem_keys=None,
mount_point=DEFAULT_MOUNT_POINT):
"""Configure the connection parameters for Kubernetes.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param kubernetes_host: Host must be a host string, a host:port pair, or a URL to the base of the
Kubernetes API server. Example: https://k8s.example.com:443
:type kubernetes_host: str | unicode
:param kubernetes_ca_cert: PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API.
NOTE: Every line must end with a newline: \n
:type kubernetes_ca_cert: str | unicode
:param token_reviewer_jwt: A service account JWT used to access the TokenReview API to validate other
JWTs during login. If not set the JWT used for login will be used to access the API.
:type token_reviewer_jwt: str | unicode
:param pem_keys: Optional list of PEM-formatted public keys or certificates used to verify the signatures of
Kubernetes service account JWTs. If a certificate is given, its public key will be extracted. Not every
installation of Kubernetes exposes these keys.
:type pem_keys: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure_method request.
:rtype: requests.Response
"""
if pem_keys is None:
pem_keys = []
list_of_pem_params = {
'kubernetes_ca_cert': kubernetes_ca_cert,
'pem_keys': pem_keys
}
for param_name, param_argument in list_of_pem_params.items():
validate_pem_format(
param_name=param_name,
param_argument=param_argument,
)
params = {
'kubernetes_host': kubernetes_host,
'kubernetes_ca_cert': kubernetes_ca_cert,
'token_reviewer_jwt': token_reviewer_jwt,
'pem_keys': pem_keys,
}
api_path = '/v1/auth/{mount_point}/config'.format(
mount_point=mount_point
)
return self._adapter.post(
url=api_path,
json=params,
) |
def _compare_expected(expected, output, sess, onnx, decimal=5, onnx_shape=None, **kwargs):
"""
Compares the expected output against the runtime outputs.
This is specific to *onnxruntime* due to variable *sess*
of type *onnxruntime.InferenceSession*.
"""
tested = 0
if isinstance(expected, list):
if isinstance(output, list):
onnx_shapes = [_.shape for _ in sess.get_outputs()]
if 'Out0' in kwargs:
expected = expected[:1]
output = output[:1]
del kwargs['Out0']
if 'Reshape' in kwargs:
del kwargs['Reshape']
output = numpy.hstack(output).ravel()
output = output.reshape((len(expected),
len(output.ravel()) // len(expected)))
if len(expected) != len(output):
raise OnnxRuntimeAssertionError("Unexpected number of outputs '{0}', expected={1}, got={2}".format(onnx, len(expected), len(output)))
for exp, out, osh in zip(expected, output, onnx_shapes):
_compare_expected(exp, out, sess, onnx, decimal=decimal, onnx_shape=osh, **kwargs)
tested += 1
else:
raise OnnxRuntimeAssertionError("Type mismatch for '{0}', output type is {1}".format(onnx, type(output)))
elif isinstance(expected, dict):
if not isinstance(output, dict):
raise OnnxRuntimeAssertionError("Type mismatch for '{0}'".format(onnx))
for k, v in output.items():
if k not in expected:
continue
msg = compare_outputs(expected[k], v, decimal=decimal, **kwargs)
if msg:
raise OnnxRuntimeAssertionError("Unexpected output '{0}' in model '{1}'\n{2}".format(k, onnx, msg))
tested += 1
elif isinstance(expected, numpy.ndarray):
if isinstance(output, list):
if expected.shape[0] == len(output) and isinstance(output[0], dict):
import pandas
output = pandas.DataFrame(output)
output = output[list(sorted(output.columns))]
output = output.values
if isinstance(output, (dict, list)):
if len(output) != 1:
ex = str(output)
if len(ex) > 70:
ex = ex[:70] + "..."
raise OnnxRuntimeAssertionError("More than one output when 1 is expected for onnx '{0}'\n{1}".format(onnx, ex))
output = output[-1]
if not isinstance(output, numpy.ndarray):
raise OnnxRuntimeAssertionError("output must be an array for onnx '{0}' not {1}".format(onnx, type(output)))
if onnx_shape is not None:
if len(onnx_shape) == 2:
cols = onnx_shape[1]
ecols = output.shape[1] if len(output.shape) == 2 else 1
if cols != ecols:
raise OnnxRuntimeAssertionError("Unexpected onnx shape {0} != {1} for onnx '{2}'".format(
onnx_shape, output.shape, onnx))
msg = compare_outputs(expected, output, decimal=decimal, **kwargs)
if isinstance(msg, ExpectedAssertionError):
raise msg
if msg:
raise OnnxRuntimeAssertionError("Unexpected output in model '{0}'\n{1}".format(onnx, msg))
tested += 1
else:
from scipy.sparse.csr import csr_matrix
if isinstance(expected, csr_matrix):
# DictVectorizer
one_array = numpy.array(output)
msg = compare_outputs(expected.todense(), one_array, decimal=decimal, **kwargs)
if msg:
raise OnnxRuntimeAssertionError("Unexpected output in model '{0}'\n{1}".format(onnx, msg))
tested += 1
else:
raise OnnxRuntimeAssertionError("Unexpected type for expected output ({1}) and onnx '{0}'".format(onnx, type(expected)))
if tested ==0:
raise OnnxRuntimeAssertionError("No test for onnx '{0}'".format(onnx)) | Compares the expected output against the runtime outputs.
This is specific to *onnxruntime* due to variable *sess*
of type *onnxruntime.InferenceSession*. | Below is the the instruction that describes the task:
### Input:
Compares the expected output against the runtime outputs.
This is specific to *onnxruntime* due to variable *sess*
of type *onnxruntime.InferenceSession*.
### Response:
def _compare_expected(expected, output, sess, onnx, decimal=5, onnx_shape=None, **kwargs):
"""
Compares the expected output against the runtime outputs.
This is specific to *onnxruntime* due to variable *sess*
of type *onnxruntime.InferenceSession*.
"""
tested = 0
if isinstance(expected, list):
if isinstance(output, list):
onnx_shapes = [_.shape for _ in sess.get_outputs()]
if 'Out0' in kwargs:
expected = expected[:1]
output = output[:1]
del kwargs['Out0']
if 'Reshape' in kwargs:
del kwargs['Reshape']
output = numpy.hstack(output).ravel()
output = output.reshape((len(expected),
len(output.ravel()) // len(expected)))
if len(expected) != len(output):
raise OnnxRuntimeAssertionError("Unexpected number of outputs '{0}', expected={1}, got={2}".format(onnx, len(expected), len(output)))
for exp, out, osh in zip(expected, output, onnx_shapes):
_compare_expected(exp, out, sess, onnx, decimal=decimal, onnx_shape=osh, **kwargs)
tested += 1
else:
raise OnnxRuntimeAssertionError("Type mismatch for '{0}', output type is {1}".format(onnx, type(output)))
elif isinstance(expected, dict):
if not isinstance(output, dict):
raise OnnxRuntimeAssertionError("Type mismatch for '{0}'".format(onnx))
for k, v in output.items():
if k not in expected:
continue
msg = compare_outputs(expected[k], v, decimal=decimal, **kwargs)
if msg:
raise OnnxRuntimeAssertionError("Unexpected output '{0}' in model '{1}'\n{2}".format(k, onnx, msg))
tested += 1
elif isinstance(expected, numpy.ndarray):
if isinstance(output, list):
if expected.shape[0] == len(output) and isinstance(output[0], dict):
import pandas
output = pandas.DataFrame(output)
output = output[list(sorted(output.columns))]
output = output.values
if isinstance(output, (dict, list)):
if len(output) != 1:
ex = str(output)
if len(ex) > 70:
ex = ex[:70] + "..."
raise OnnxRuntimeAssertionError("More than one output when 1 is expected for onnx '{0}'\n{1}".format(onnx, ex))
output = output[-1]
if not isinstance(output, numpy.ndarray):
raise OnnxRuntimeAssertionError("output must be an array for onnx '{0}' not {1}".format(onnx, type(output)))
if onnx_shape is not None:
if len(onnx_shape) == 2:
cols = onnx_shape[1]
ecols = output.shape[1] if len(output.shape) == 2 else 1
if cols != ecols:
raise OnnxRuntimeAssertionError("Unexpected onnx shape {0} != {1} for onnx '{2}'".format(
onnx_shape, output.shape, onnx))
msg = compare_outputs(expected, output, decimal=decimal, **kwargs)
if isinstance(msg, ExpectedAssertionError):
raise msg
if msg:
raise OnnxRuntimeAssertionError("Unexpected output in model '{0}'\n{1}".format(onnx, msg))
tested += 1
else:
from scipy.sparse.csr import csr_matrix
if isinstance(expected, csr_matrix):
# DictVectorizer
one_array = numpy.array(output)
msg = compare_outputs(expected.todense(), one_array, decimal=decimal, **kwargs)
if msg:
raise OnnxRuntimeAssertionError("Unexpected output in model '{0}'\n{1}".format(onnx, msg))
tested += 1
else:
raise OnnxRuntimeAssertionError("Unexpected type for expected output ({1}) and onnx '{0}'".format(onnx, type(expected)))
if tested ==0:
raise OnnxRuntimeAssertionError("No test for onnx '{0}'".format(onnx)) |
def _imm_delattr(self, name):
'''
A persistent immutable's delattr allows the object's value-caches to be invalidated, otherwise
raises an exception.
'''
if _imm_is_persist(self):
values = _imm_value_data(self)
if name in values:
dd = object.__getattribute__(self, '__dict__')
if name in dd:
del dd[name]
if name in _imm_const_data(self): _imm_check(imm, [name])
else:
raise TypeError('Attempt to reset parameter \'%s\' of non-transient immutable' % name)
else:
return _imm_trans_delattr(self, name) | A persistent immutable's delattr allows the object's value-caches to be invalidated, otherwise
raises an exception. | Below is the the instruction that describes the task:
### Input:
A persistent immutable's delattr allows the object's value-caches to be invalidated, otherwise
raises an exception.
### Response:
def _imm_delattr(self, name):
'''
A persistent immutable's delattr allows the object's value-caches to be invalidated, otherwise
raises an exception.
'''
if _imm_is_persist(self):
values = _imm_value_data(self)
if name in values:
dd = object.__getattribute__(self, '__dict__')
if name in dd:
del dd[name]
if name in _imm_const_data(self): _imm_check(imm, [name])
else:
raise TypeError('Attempt to reset parameter \'%s\' of non-transient immutable' % name)
else:
return _imm_trans_delattr(self, name) |
def get_scores(self, corpus):
'''
Parameters
----------
corpus
Returns
-------
float, pd.Series
float: point on x-axis at even characteristicness
pd.Series: term -> value between 0 and 1, sorted by score in a descending manner
Background scores from corpus
'''
term_ranks = self.term_ranker(corpus).get_ranks()
freq_df = pd.DataFrame({
'corpus': term_ranks.sum(axis=1),
'standard': self.background_frequencies.get_background_frequency_df()['background']}
).dropna()
corpus_rank = rankdata(freq_df.corpus, 'dense')
standard_rank = rankdata(freq_df.standard, 'dense')
scores = corpus_rank/corpus_rank.max() - standard_rank/standard_rank.max()
#scores = RankDifference().get_scores(bg['corpus'], bg['bg']).sort_values()
# import pdb; pdb.set_trace()
if self.rerank_ranks:
rank_scores, zero_marker = self._rerank_scores(scores)
freq_df['score'] = pd.Series(rank_scores, index=freq_df.index)
else:
if scores.min() < 0 and scores.max() > 0:
zero_marker = -scores.min() / (scores.max() - scores.min())
elif scores.min() > 0:
zero_marker = 0
else:
zero_marker = 1
freq_df['score'] = scale(scores)
return zero_marker, freq_df.sort_values(by='score', ascending=False)['score'] | Parameters
----------
corpus
Returns
-------
float, pd.Series
float: point on x-axis at even characteristicness
pd.Series: term -> value between 0 and 1, sorted by score in a descending manner
Background scores from corpus | Below is the the instruction that describes the task:
### Input:
Parameters
----------
corpus
Returns
-------
float, pd.Series
float: point on x-axis at even characteristicness
pd.Series: term -> value between 0 and 1, sorted by score in a descending manner
Background scores from corpus
### Response:
def get_scores(self, corpus):
'''
Parameters
----------
corpus
Returns
-------
float, pd.Series
float: point on x-axis at even characteristicness
pd.Series: term -> value between 0 and 1, sorted by score in a descending manner
Background scores from corpus
'''
term_ranks = self.term_ranker(corpus).get_ranks()
freq_df = pd.DataFrame({
'corpus': term_ranks.sum(axis=1),
'standard': self.background_frequencies.get_background_frequency_df()['background']}
).dropna()
corpus_rank = rankdata(freq_df.corpus, 'dense')
standard_rank = rankdata(freq_df.standard, 'dense')
scores = corpus_rank/corpus_rank.max() - standard_rank/standard_rank.max()
#scores = RankDifference().get_scores(bg['corpus'], bg['bg']).sort_values()
# import pdb; pdb.set_trace()
if self.rerank_ranks:
rank_scores, zero_marker = self._rerank_scores(scores)
freq_df['score'] = pd.Series(rank_scores, index=freq_df.index)
else:
if scores.min() < 0 and scores.max() > 0:
zero_marker = -scores.min() / (scores.max() - scores.min())
elif scores.min() > 0:
zero_marker = 0
else:
zero_marker = 1
freq_df['score'] = scale(scores)
return zero_marker, freq_df.sort_values(by='score', ascending=False)['score'] |
def parse_parameter(self, node):
"""
Parses <Parameter>
@param node: Node containing the <Parameter> element
@type node: xml.etree.Element
@raise ParseError: Raised when the parameter does not have a name.
@raise ParseError: Raised when the parameter does not have a
dimension.
"""
if self.current_component_type == None:
self.raise_error('Parameters can only be defined in ' +
'a component type')
try:
name = node.lattrib['name']
except:
self.raise_error('<Parameter> must specify a name')
try:
dimension = node.lattrib['dimension']
except:
self.raise_error("Parameter '{0}' has no dimension",
name)
parameter = Parameter(name, dimension)
self.current_component_type.add_parameter(parameter) | Parses <Parameter>
@param node: Node containing the <Parameter> element
@type node: xml.etree.Element
@raise ParseError: Raised when the parameter does not have a name.
@raise ParseError: Raised when the parameter does not have a
dimension. | Below is the the instruction that describes the task:
### Input:
Parses <Parameter>
@param node: Node containing the <Parameter> element
@type node: xml.etree.Element
@raise ParseError: Raised when the parameter does not have a name.
@raise ParseError: Raised when the parameter does not have a
dimension.
### Response:
def parse_parameter(self, node):
"""
Parses <Parameter>
@param node: Node containing the <Parameter> element
@type node: xml.etree.Element
@raise ParseError: Raised when the parameter does not have a name.
@raise ParseError: Raised when the parameter does not have a
dimension.
"""
if self.current_component_type == None:
self.raise_error('Parameters can only be defined in ' +
'a component type')
try:
name = node.lattrib['name']
except:
self.raise_error('<Parameter> must specify a name')
try:
dimension = node.lattrib['dimension']
except:
self.raise_error("Parameter '{0}' has no dimension",
name)
parameter = Parameter(name, dimension)
self.current_component_type.add_parameter(parameter) |
def wait_until_done(self):
"""
This method will not return until the job is either complete or has
reached an error state. This queries the server periodically to check
for an update in status.
"""
wait = 1
while True:
time.sleep(wait)
self.get_info()
if self.info['status']['isFinished']:
break
# implements a crude exponential back off
wait = min(wait * 2, 60) | This method will not return until the job is either complete or has
reached an error state. This queries the server periodically to check
for an update in status. | Below is the the instruction that describes the task:
### Input:
This method will not return until the job is either complete or has
reached an error state. This queries the server periodically to check
for an update in status.
### Response:
def wait_until_done(self):
"""
This method will not return until the job is either complete or has
reached an error state. This queries the server periodically to check
for an update in status.
"""
wait = 1
while True:
time.sleep(wait)
self.get_info()
if self.info['status']['isFinished']:
break
# implements a crude exponential back off
wait = min(wait * 2, 60) |
def update(self, loadbalancer, name=None, algorithm=None, protocol=None,
halfClosed=None, port=None, timeout=None, httpsRedirect=None):
"""
Provides a way to modify the following attributes of a load balancer:
- name
- algorithm
- protocol
- halfClosed
- port
- timeout
- httpsRedirect
"""
return self._manager.update(loadbalancer, name=name,
algorithm=algorithm, protocol=protocol, halfClosed=halfClosed,
port=port, timeout=timeout, httpsRedirect=httpsRedirect) | Provides a way to modify the following attributes of a load balancer:
- name
- algorithm
- protocol
- halfClosed
- port
- timeout
- httpsRedirect | Below is the the instruction that describes the task:
### Input:
Provides a way to modify the following attributes of a load balancer:
- name
- algorithm
- protocol
- halfClosed
- port
- timeout
- httpsRedirect
### Response:
def update(self, loadbalancer, name=None, algorithm=None, protocol=None,
halfClosed=None, port=None, timeout=None, httpsRedirect=None):
"""
Provides a way to modify the following attributes of a load balancer:
- name
- algorithm
- protocol
- halfClosed
- port
- timeout
- httpsRedirect
"""
return self._manager.update(loadbalancer, name=name,
algorithm=algorithm, protocol=protocol, halfClosed=halfClosed,
port=port, timeout=timeout, httpsRedirect=httpsRedirect) |
def equivalent_sites(self, scaled_positions, ondublicates='error',
symprec=1e-3):
"""Returns the scaled positions and all their equivalent sites.
Parameters:
scaled_positions: list | array
List of non-equivalent sites given in unit cell coordinates.
ondublicates : 'keep' | 'replace' | 'warn' | 'error'
Action if `scaled_positions` contain symmetry-equivalent
positions:
'keep'
ignore additional symmetry-equivalent positions
'replace'
replace
'warn'
like 'keep', but issue an UserWarning
'error'
raises a SpacegroupValueError
symprec: float
Minimum "distance" betweed two sites in scaled coordinates
before they are counted as the same site.
Returns:
sites: array
A NumPy array of equivalent sites.
kinds: list
A list of integer indices specifying which input site is
equivalent to the corresponding returned site.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sites, kinds = sg.equivalent_sites([[0, 0, 0], [0.5, 0.0, 0.0]])
>>> sites
array([[ 0. , 0. , 0. ],
[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ],
[ 0.5, 0. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 0.5],
[ 0.5, 0.5, 0.5]])
>>> kinds
[0, 0, 0, 0, 1, 1, 1, 1]
"""
kinds = []
sites = []
symprec2 = symprec**2
scaled = np.array(scaled_positions, ndmin=2)
for kind, pos in enumerate(scaled):
for rot, trans in self.get_symop():
site = np.mod(np.dot(rot, pos) + trans, 1.)
if not sites:
sites.append(site)
kinds.append(kind)
continue
t = site - sites
mask = np.sum(t*t, 1) < symprec2
if np.any(mask):
ind = np.argwhere(mask)[0][0]
if kinds[ind] == kind:
pass
elif ondublicates == 'keep':
pass
elif ondublicates == 'replace':
kinds[ind] = kind
elif ondublicates == 'warn':
warnings.warn('scaled_positions %d and %d '
'are equivalent'%(kinds[ind], kind))
elif ondublicates == 'error':
raise SpacegroupValueError(
'scaled_positions %d and %d are equivalent'%(
kinds[ind], kind))
else:
raise SpacegroupValueError(
'Argument "ondublicates" must be one of: '
'"keep", "replace", "warn" or "error".')
else:
sites.append(site)
kinds.append(kind)
return np.array(sites), kinds | Returns the scaled positions and all their equivalent sites.
Parameters:
scaled_positions: list | array
List of non-equivalent sites given in unit cell coordinates.
ondublicates : 'keep' | 'replace' | 'warn' | 'error'
Action if `scaled_positions` contain symmetry-equivalent
positions:
'keep'
ignore additional symmetry-equivalent positions
'replace'
replace
'warn'
like 'keep', but issue an UserWarning
'error'
raises a SpacegroupValueError
symprec: float
Minimum "distance" betweed two sites in scaled coordinates
before they are counted as the same site.
Returns:
sites: array
A NumPy array of equivalent sites.
kinds: list
A list of integer indices specifying which input site is
equivalent to the corresponding returned site.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sites, kinds = sg.equivalent_sites([[0, 0, 0], [0.5, 0.0, 0.0]])
>>> sites
array([[ 0. , 0. , 0. ],
[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ],
[ 0.5, 0. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 0.5],
[ 0.5, 0.5, 0.5]])
>>> kinds
[0, 0, 0, 0, 1, 1, 1, 1] | Below is the the instruction that describes the task:
### Input:
Returns the scaled positions and all their equivalent sites.
Parameters:
scaled_positions: list | array
List of non-equivalent sites given in unit cell coordinates.
ondublicates : 'keep' | 'replace' | 'warn' | 'error'
Action if `scaled_positions` contain symmetry-equivalent
positions:
'keep'
ignore additional symmetry-equivalent positions
'replace'
replace
'warn'
like 'keep', but issue an UserWarning
'error'
raises a SpacegroupValueError
symprec: float
Minimum "distance" betweed two sites in scaled coordinates
before they are counted as the same site.
Returns:
sites: array
A NumPy array of equivalent sites.
kinds: list
A list of integer indices specifying which input site is
equivalent to the corresponding returned site.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sites, kinds = sg.equivalent_sites([[0, 0, 0], [0.5, 0.0, 0.0]])
>>> sites
array([[ 0. , 0. , 0. ],
[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ],
[ 0.5, 0. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 0.5],
[ 0.5, 0.5, 0.5]])
>>> kinds
[0, 0, 0, 0, 1, 1, 1, 1]
### Response:
def equivalent_sites(self, scaled_positions, ondublicates='error',
symprec=1e-3):
"""Returns the scaled positions and all their equivalent sites.
Parameters:
scaled_positions: list | array
List of non-equivalent sites given in unit cell coordinates.
ondublicates : 'keep' | 'replace' | 'warn' | 'error'
Action if `scaled_positions` contain symmetry-equivalent
positions:
'keep'
ignore additional symmetry-equivalent positions
'replace'
replace
'warn'
like 'keep', but issue an UserWarning
'error'
raises a SpacegroupValueError
symprec: float
Minimum "distance" betweed two sites in scaled coordinates
before they are counted as the same site.
Returns:
sites: array
A NumPy array of equivalent sites.
kinds: list
A list of integer indices specifying which input site is
equivalent to the corresponding returned site.
Example:
>>> from ase.lattice.spacegroup import Spacegroup
>>> sg = Spacegroup(225) # fcc
>>> sites, kinds = sg.equivalent_sites([[0, 0, 0], [0.5, 0.0, 0.0]])
>>> sites
array([[ 0. , 0. , 0. ],
[ 0. , 0.5, 0.5],
[ 0.5, 0. , 0.5],
[ 0.5, 0.5, 0. ],
[ 0.5, 0. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 0.5],
[ 0.5, 0.5, 0.5]])
>>> kinds
[0, 0, 0, 0, 1, 1, 1, 1]
"""
kinds = []
sites = []
symprec2 = symprec**2
scaled = np.array(scaled_positions, ndmin=2)
for kind, pos in enumerate(scaled):
for rot, trans in self.get_symop():
site = np.mod(np.dot(rot, pos) + trans, 1.)
if not sites:
sites.append(site)
kinds.append(kind)
continue
t = site - sites
mask = np.sum(t*t, 1) < symprec2
if np.any(mask):
ind = np.argwhere(mask)[0][0]
if kinds[ind] == kind:
pass
elif ondublicates == 'keep':
pass
elif ondublicates == 'replace':
kinds[ind] = kind
elif ondublicates == 'warn':
warnings.warn('scaled_positions %d and %d '
'are equivalent'%(kinds[ind], kind))
elif ondublicates == 'error':
raise SpacegroupValueError(
'scaled_positions %d and %d are equivalent'%(
kinds[ind], kind))
else:
raise SpacegroupValueError(
'Argument "ondublicates" must be one of: '
'"keep", "replace", "warn" or "error".')
else:
sites.append(site)
kinds.append(kind)
return np.array(sites), kinds |
def apiv1_root_view():
"""
API root url. Shows a list of active endpoints.
"""
docs_url = current_app.config.get('DOCS_URL',
'http://' + request.host + '/docs')
message = "Welcome to the voeventdb REST API, " \
"interface version '{}'.".format(
apiv1.name)
api_details = {
'message': message,
'api_version': apiv1.name,
'git_sha': package_version_dict['full-revisionid'][:8],
'version_tag': package_version_dict['version'],
'endpoints': [str(r) for r in get_apiv1_rules()],
'docs_url': docs_url
}
if 'text/html' in request.headers.get("Accept", ""):
return render_template('landing.html',
**api_details
)
else:
return jsonify(api_details) | API root url. Shows a list of active endpoints. | Below is the the instruction that describes the task:
### Input:
API root url. Shows a list of active endpoints.
### Response:
def apiv1_root_view():
"""
API root url. Shows a list of active endpoints.
"""
docs_url = current_app.config.get('DOCS_URL',
'http://' + request.host + '/docs')
message = "Welcome to the voeventdb REST API, " \
"interface version '{}'.".format(
apiv1.name)
api_details = {
'message': message,
'api_version': apiv1.name,
'git_sha': package_version_dict['full-revisionid'][:8],
'version_tag': package_version_dict['version'],
'endpoints': [str(r) for r in get_apiv1_rules()],
'docs_url': docs_url
}
if 'text/html' in request.headers.get("Accept", ""):
return render_template('landing.html',
**api_details
)
else:
return jsonify(api_details) |
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n') | Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool`` | Below is the the instruction that describes the task:
### Input:
Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
### Response:
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n') |
def get_namespaces(namespace="", apiserver_url=None):
'''
.. versionadded:: 2016.3.0
Get one or all kubernetes namespaces.
If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example:
.. code-block:: bash
kubectl get namespaces -o json
In case namespace is set by user, the output will be similar to the one from kubectl:
.. code-block:: bash
kubectl get namespaces namespace_name -o json
CLI Example:
.. code-block:: bash
salt '*' k8s.get_namespaces
salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local
'''
# Try to get kubernetes master
apiserver_url = _guess_apiserver(apiserver_url)
if apiserver_url is None:
return False
# Get data
ret = _get_namespaces(apiserver_url, namespace)
return ret | .. versionadded:: 2016.3.0
Get one or all kubernetes namespaces.
If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example:
.. code-block:: bash
kubectl get namespaces -o json
In case namespace is set by user, the output will be similar to the one from kubectl:
.. code-block:: bash
kubectl get namespaces namespace_name -o json
CLI Example:
.. code-block:: bash
salt '*' k8s.get_namespaces
salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2016.3.0
Get one or all kubernetes namespaces.
If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example:
.. code-block:: bash
kubectl get namespaces -o json
In case namespace is set by user, the output will be similar to the one from kubectl:
.. code-block:: bash
kubectl get namespaces namespace_name -o json
CLI Example:
.. code-block:: bash
salt '*' k8s.get_namespaces
salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local
### Response:
def get_namespaces(namespace="", apiserver_url=None):
'''
.. versionadded:: 2016.3.0
Get one or all kubernetes namespaces.
If namespace parameter is omitted, all namespaces will be returned back to user, similar to following kubectl example:
.. code-block:: bash
kubectl get namespaces -o json
In case namespace is set by user, the output will be similar to the one from kubectl:
.. code-block:: bash
kubectl get namespaces namespace_name -o json
CLI Example:
.. code-block:: bash
salt '*' k8s.get_namespaces
salt '*' k8s.get_namespaces namespace_name http://kube-master.cluster.local
'''
# Try to get kubernetes master
apiserver_url = _guess_apiserver(apiserver_url)
if apiserver_url is None:
return False
# Get data
ret = _get_namespaces(apiserver_url, namespace)
return ret |
def count(self):
"""
Returns the number of widgets currently displayed (takes child splits
into account).
"""
c = self.main_tab_widget.count()
for child in self.child_splitters:
c += child.count()
return c | Returns the number of widgets currently displayed (takes child splits
into account). | Below is the the instruction that describes the task:
### Input:
Returns the number of widgets currently displayed (takes child splits
into account).
### Response:
def count(self):
"""
Returns the number of widgets currently displayed (takes child splits
into account).
"""
c = self.main_tab_widget.count()
for child in self.child_splitters:
c += child.count()
return c |
def send(self, request, headers=None, content=None, **kwargs):
"""Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides
"""
# "content" and "headers" are deprecated, only old SDK
if headers:
request.headers.update(headers)
if not request.files and request.data is None and content is not None:
request.add_content(content)
# End of deprecation
response = None
kwargs.setdefault('stream', True)
try:
pipeline_response = self.config.pipeline.run(request, **kwargs)
# There is too much thing that expects this method to return a "requests.Response"
# to break it in a compatible release.
# Also, to be pragmatic in the "sync" world "requests" rules anyway.
# However, attach the Universal HTTP response
# to get the streaming generator.
response = pipeline_response.http_response.internal_response
response._universal_http_response = pipeline_response.http_response
response.context = pipeline_response.context
return response
finally:
self._close_local_session_if_necessary(response, kwargs['stream']) | Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides | Below is the the instruction that describes the task:
### Input:
Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides
### Response:
def send(self, request, headers=None, content=None, **kwargs):
"""Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides
"""
# "content" and "headers" are deprecated, only old SDK
if headers:
request.headers.update(headers)
if not request.files and request.data is None and content is not None:
request.add_content(content)
# End of deprecation
response = None
kwargs.setdefault('stream', True)
try:
pipeline_response = self.config.pipeline.run(request, **kwargs)
# There is too much thing that expects this method to return a "requests.Response"
# to break it in a compatible release.
# Also, to be pragmatic in the "sync" world "requests" rules anyway.
# However, attach the Universal HTTP response
# to get the streaming generator.
response = pipeline_response.http_response.internal_response
response._universal_http_response = pipeline_response.http_response
response.context = pipeline_response.context
return response
finally:
self._close_local_session_if_necessary(response, kwargs['stream']) |
def get_book(self):
"""Gets the ``Book`` at this node.
return: (osid.commenting.Book) - the book represented by this
node
*compliance: mandatory -- This method must be implemented.*
"""
if self._lookup_session is None:
mgr = get_provider_manager('COMMENTING', runtime=self._runtime, proxy=self._proxy)
self._lookup_session = mgr.get_book_lookup_session(proxy=getattr(self, "_proxy", None))
return self._lookup_session.get_book(Id(self._my_map['id'])) | Gets the ``Book`` at this node.
return: (osid.commenting.Book) - the book represented by this
node
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the ``Book`` at this node.
return: (osid.commenting.Book) - the book represented by this
node
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_book(self):
"""Gets the ``Book`` at this node.
return: (osid.commenting.Book) - the book represented by this
node
*compliance: mandatory -- This method must be implemented.*
"""
if self._lookup_session is None:
mgr = get_provider_manager('COMMENTING', runtime=self._runtime, proxy=self._proxy)
self._lookup_session = mgr.get_book_lookup_session(proxy=getattr(self, "_proxy", None))
return self._lookup_session.get_book(Id(self._my_map['id'])) |
def _apply_odf_properties(df, headers, model):
"""
Attach properties to the Dataframe to carry along ODF metadata
:param df: The dataframe to be modified
:param headers: The ODF header lines
:param model: The ODF model type
"""
df.headers = headers
df.model = model | Attach properties to the Dataframe to carry along ODF metadata
:param df: The dataframe to be modified
:param headers: The ODF header lines
:param model: The ODF model type | Below is the the instruction that describes the task:
### Input:
Attach properties to the Dataframe to carry along ODF metadata
:param df: The dataframe to be modified
:param headers: The ODF header lines
:param model: The ODF model type
### Response:
def _apply_odf_properties(df, headers, model):
"""
Attach properties to the Dataframe to carry along ODF metadata
:param df: The dataframe to be modified
:param headers: The ODF header lines
:param model: The ODF model type
"""
df.headers = headers
df.model = model |
def kick(self, member, reason=None):
"""
Kick an occupant from the MUC.
:param member: The member to kick.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the kicked member.
:type reason: :class:`str`
:raises aioxmpp.errors.XMPPError: if the server returned an error for
the kick command.
.. seealso::
:meth:`.AbstractConversation.kick` for the full interface
specification.
"""
yield from self.muc_set_role(
member.nick,
"none",
reason=reason
) | Kick an occupant from the MUC.
:param member: The member to kick.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the kicked member.
:type reason: :class:`str`
:raises aioxmpp.errors.XMPPError: if the server returned an error for
the kick command.
.. seealso::
:meth:`.AbstractConversation.kick` for the full interface
specification. | Below is the the instruction that describes the task:
### Input:
Kick an occupant from the MUC.
:param member: The member to kick.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the kicked member.
:type reason: :class:`str`
:raises aioxmpp.errors.XMPPError: if the server returned an error for
the kick command.
.. seealso::
:meth:`.AbstractConversation.kick` for the full interface
specification.
### Response:
def kick(self, member, reason=None):
"""
Kick an occupant from the MUC.
:param member: The member to kick.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the kicked member.
:type reason: :class:`str`
:raises aioxmpp.errors.XMPPError: if the server returned an error for
the kick command.
.. seealso::
:meth:`.AbstractConversation.kick` for the full interface
specification.
"""
yield from self.muc_set_role(
member.nick,
"none",
reason=reason
) |
def get(self, index):
"""Gets data values for specified :index:.
:index: Index for which to get data.
:returns: A list in form
[parent, name, priority, comment, done, children].
"""
data = self.data
index2 = self._split(index)
for c in index2[:-1]:
i = int(c) - 1
data = data[i][4]
return [index[:-2] or ""] + data[int(index[-1]) - 1] | Gets data values for specified :index:.
:index: Index for which to get data.
:returns: A list in form
[parent, name, priority, comment, done, children]. | Below is the the instruction that describes the task:
### Input:
Gets data values for specified :index:.
:index: Index for which to get data.
:returns: A list in form
[parent, name, priority, comment, done, children].
### Response:
def get(self, index):
"""Gets data values for specified :index:.
:index: Index for which to get data.
:returns: A list in form
[parent, name, priority, comment, done, children].
"""
data = self.data
index2 = self._split(index)
for c in index2[:-1]:
i = int(c) - 1
data = data[i][4]
return [index[:-2] or ""] + data[int(index[-1]) - 1] |
def update_configs(self, release):
""" Update the fedora-atomic.git repositories for a given release """
git_repo = release['git_repo']
git_cache = release['git_cache']
if not os.path.isdir(git_cache):
self.call(['git', 'clone', '--mirror', git_repo, git_cache])
else:
self.call(['git', 'fetch', '--all', '--prune'], cwd=git_cache)
git_dir = release['git_dir'] = os.path.join(release['tmp_dir'],
os.path.basename(git_repo))
self.call(['git', 'clone', '-b', release['git_branch'],
git_cache, git_dir])
if release['delete_repo_files']:
for repo_file in glob.glob(os.path.join(git_dir, '*.repo')):
self.log.info('Deleting %s' % repo_file)
os.unlink(repo_file) | Update the fedora-atomic.git repositories for a given release | Below is the the instruction that describes the task:
### Input:
Update the fedora-atomic.git repositories for a given release
### Response:
def update_configs(self, release):
""" Update the fedora-atomic.git repositories for a given release """
git_repo = release['git_repo']
git_cache = release['git_cache']
if not os.path.isdir(git_cache):
self.call(['git', 'clone', '--mirror', git_repo, git_cache])
else:
self.call(['git', 'fetch', '--all', '--prune'], cwd=git_cache)
git_dir = release['git_dir'] = os.path.join(release['tmp_dir'],
os.path.basename(git_repo))
self.call(['git', 'clone', '-b', release['git_branch'],
git_cache, git_dir])
if release['delete_repo_files']:
for repo_file in glob.glob(os.path.join(git_dir, '*.repo')):
self.log.info('Deleting %s' % repo_file)
os.unlink(repo_file) |
def get_changes(self, extracted_name, similar=False, global_=False):
"""Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global.
"""
info = _ExtractInfo(
self.project, self.resource, self.start_offset, self.end_offset,
extracted_name, variable=self.kind == 'variable',
similar=similar, make_global=global_)
new_contents = _ExtractPerformer(info).extract()
changes = ChangeSet('Extract %s <%s>' % (self.kind,
extracted_name))
changes.add_change(ChangeContents(self.resource, new_contents))
return changes | Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global. | Below is the the instruction that describes the task:
### Input:
Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global.
### Response:
def get_changes(self, extracted_name, similar=False, global_=False):
"""Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global.
"""
info = _ExtractInfo(
self.project, self.resource, self.start_offset, self.end_offset,
extracted_name, variable=self.kind == 'variable',
similar=similar, make_global=global_)
new_contents = _ExtractPerformer(info).extract()
changes = ChangeSet('Extract %s <%s>' % (self.kind,
extracted_name))
changes.add_change(ChangeContents(self.resource, new_contents))
return changes |
def _list_libraries_cached(self, newer_than_secs=-1):
"""
Returns
-------
List of Arctic library names from a cached collection (global per mongo cluster) in mongo.
Long term list_libraries should have a use_cached argument.
"""
_ = self._conn # Ensures the connection exists and cache is initialized with it.
cache_data = self._cache.get('list_libraries', newer_than_secs)
if cache_data:
logger.debug('Library names are in cache.')
return cache_data
return self._list_libraries() | Returns
-------
List of Arctic library names from a cached collection (global per mongo cluster) in mongo.
Long term list_libraries should have a use_cached argument. | Below is the the instruction that describes the task:
### Input:
Returns
-------
List of Arctic library names from a cached collection (global per mongo cluster) in mongo.
Long term list_libraries should have a use_cached argument.
### Response:
def _list_libraries_cached(self, newer_than_secs=-1):
"""
Returns
-------
List of Arctic library names from a cached collection (global per mongo cluster) in mongo.
Long term list_libraries should have a use_cached argument.
"""
_ = self._conn # Ensures the connection exists and cache is initialized with it.
cache_data = self._cache.get('list_libraries', newer_than_secs)
if cache_data:
logger.debug('Library names are in cache.')
return cache_data
return self._list_libraries() |
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
cross-reference.
If no resolution can be found, None can be returned; the xref node will
then given to the :event:`missing-reference` event, and if that yields no
resolution, replaced by *contnode*.
The method can also raise :exc:`sphinx.environment.NoUri` to suppress
the :event:`missing-reference` event being emitted.
"""
for fullname, (docname, objtype) in self.data['objects'].items():
if fullname.name == target:
return make_refnode(builder, fromdocname, docname, fullname2id(fullname), contnode, fullname.name)
return None | Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
cross-reference.
If no resolution can be found, None can be returned; the xref node will
then given to the :event:`missing-reference` event, and if that yields no
resolution, replaced by *contnode*.
The method can also raise :exc:`sphinx.environment.NoUri` to suppress
the :event:`missing-reference` event being emitted. | Below is the the instruction that describes the task:
### Input:
Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
cross-reference.
If no resolution can be found, None can be returned; the xref node will
then given to the :event:`missing-reference` event, and if that yields no
resolution, replaced by *contnode*.
The method can also raise :exc:`sphinx.environment.NoUri` to suppress
the :event:`missing-reference` event being emitted.
### Response:
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
cross-reference.
If no resolution can be found, None can be returned; the xref node will
then given to the :event:`missing-reference` event, and if that yields no
resolution, replaced by *contnode*.
The method can also raise :exc:`sphinx.environment.NoUri` to suppress
the :event:`missing-reference` event being emitted.
"""
for fullname, (docname, objtype) in self.data['objects'].items():
if fullname.name == target:
return make_refnode(builder, fromdocname, docname, fullname2id(fullname), contnode, fullname.name)
return None |
def set_contents(self, stream, chunk_size=None, size=None, size_limit=None,
progress_callback=None):
"""Save contents of stream to file instance.
If a file instance has already been set, this methods raises an
``FileInstanceAlreadySetError`` exception.
:param stream: File-like stream.
:param size: Size of stream if known.
:param chunk_size: Desired chunk size to read stream in. It is up to
the storage interface if it respects this value.
"""
if size_limit is None:
size_limit = self.bucket.size_limit
self.file = FileInstance.create()
self.file.set_contents(
stream, size_limit=size_limit, size=size, chunk_size=chunk_size,
progress_callback=progress_callback,
default_location=self.bucket.location.uri,
default_storage_class=self.bucket.default_storage_class,
)
return self | Save contents of stream to file instance.
If a file instance has already been set, this methods raises an
``FileInstanceAlreadySetError`` exception.
:param stream: File-like stream.
:param size: Size of stream if known.
:param chunk_size: Desired chunk size to read stream in. It is up to
the storage interface if it respects this value. | Below is the the instruction that describes the task:
### Input:
Save contents of stream to file instance.
If a file instance has already been set, this methods raises an
``FileInstanceAlreadySetError`` exception.
:param stream: File-like stream.
:param size: Size of stream if known.
:param chunk_size: Desired chunk size to read stream in. It is up to
the storage interface if it respects this value.
### Response:
def set_contents(self, stream, chunk_size=None, size=None, size_limit=None,
progress_callback=None):
"""Save contents of stream to file instance.
If a file instance has already been set, this methods raises an
``FileInstanceAlreadySetError`` exception.
:param stream: File-like stream.
:param size: Size of stream if known.
:param chunk_size: Desired chunk size to read stream in. It is up to
the storage interface if it respects this value.
"""
if size_limit is None:
size_limit = self.bucket.size_limit
self.file = FileInstance.create()
self.file.set_contents(
stream, size_limit=size_limit, size=size, chunk_size=chunk_size,
progress_callback=progress_callback,
default_location=self.bucket.location.uri,
default_storage_class=self.bucket.default_storage_class,
)
return self |
def launch(self,
argv=None,
showusageonnoargs=False,
width=0,
helphint="Use with --help for more information.\n",
debug_parser=False):
"""Do the usual stuff to initiallize the program.
Read config files and parse arguments, and if the user has used any
of the help/version/settings options, display help and exit.
If debug_parser is false, don't catch ParseErrors and exit with user
friendly help. Crash with traceback instead.
configfiles is a list of config files. None means use self.configfiles.
sections is a list of configfile section names to read. None means use
self.sections.
argv is a list of arguments to parse. Will be modified. None means use
copy of sys.argv. argv[0] is ignored.
If showusageonnoargs is true, show usage and exit if the user didn't
give any args. Should be False if there are no required PositionalArgs.
width is the maximum allowed page width. 0 means use self.width.
helphint is a string that hints on how to get more help which is
displayed at the end of usage help messages.
"""
if showusageonnoargs and len(argv) == 1:
print self.shorthelp(width=width)
if helphint:
print self._wrap(helphint, indent=2, width=width)
sys.exit(0)
parsing_error = None
try:
self.parse_files()
self.parse_argv(argv)
except ParseError, parsing_error:
if debug_parser:
raise
for optiontype in ['help', 'longhelp', 'settings', 'version']:
name = self.basic_option_names.get(optiontype)
if name and self[name]:
methodname = optiontype.rstrip('help') + 'help'
print getattr(self, methodname)(width)
sys.exit()
if parsing_error:
self.graceful_exit(parsing_error, width) | Do the usual stuff to initiallize the program.
Read config files and parse arguments, and if the user has used any
of the help/version/settings options, display help and exit.
If debug_parser is false, don't catch ParseErrors and exit with user
friendly help. Crash with traceback instead.
configfiles is a list of config files. None means use self.configfiles.
sections is a list of configfile section names to read. None means use
self.sections.
argv is a list of arguments to parse. Will be modified. None means use
copy of sys.argv. argv[0] is ignored.
If showusageonnoargs is true, show usage and exit if the user didn't
give any args. Should be False if there are no required PositionalArgs.
width is the maximum allowed page width. 0 means use self.width.
helphint is a string that hints on how to get more help which is
displayed at the end of usage help messages. | Below is the the instruction that describes the task:
### Input:
Do the usual stuff to initiallize the program.
Read config files and parse arguments, and if the user has used any
of the help/version/settings options, display help and exit.
If debug_parser is false, don't catch ParseErrors and exit with user
friendly help. Crash with traceback instead.
configfiles is a list of config files. None means use self.configfiles.
sections is a list of configfile section names to read. None means use
self.sections.
argv is a list of arguments to parse. Will be modified. None means use
copy of sys.argv. argv[0] is ignored.
If showusageonnoargs is true, show usage and exit if the user didn't
give any args. Should be False if there are no required PositionalArgs.
width is the maximum allowed page width. 0 means use self.width.
helphint is a string that hints on how to get more help which is
displayed at the end of usage help messages.
### Response:
def launch(self,
argv=None,
showusageonnoargs=False,
width=0,
helphint="Use with --help for more information.\n",
debug_parser=False):
"""Do the usual stuff to initiallize the program.
Read config files and parse arguments, and if the user has used any
of the help/version/settings options, display help and exit.
If debug_parser is false, don't catch ParseErrors and exit with user
friendly help. Crash with traceback instead.
configfiles is a list of config files. None means use self.configfiles.
sections is a list of configfile section names to read. None means use
self.sections.
argv is a list of arguments to parse. Will be modified. None means use
copy of sys.argv. argv[0] is ignored.
If showusageonnoargs is true, show usage and exit if the user didn't
give any args. Should be False if there are no required PositionalArgs.
width is the maximum allowed page width. 0 means use self.width.
helphint is a string that hints on how to get more help which is
displayed at the end of usage help messages.
"""
if showusageonnoargs and len(argv) == 1:
print self.shorthelp(width=width)
if helphint:
print self._wrap(helphint, indent=2, width=width)
sys.exit(0)
parsing_error = None
try:
self.parse_files()
self.parse_argv(argv)
except ParseError, parsing_error:
if debug_parser:
raise
for optiontype in ['help', 'longhelp', 'settings', 'version']:
name = self.basic_option_names.get(optiontype)
if name and self[name]:
methodname = optiontype.rstrip('help') + 'help'
print getattr(self, methodname)(width)
sys.exit()
if parsing_error:
self.graceful_exit(parsing_error, width) |
def get_time_as_string(self):
"""stub"""
if self.has_time():
return (str(self.time['hours']).zfill(2) + ':' +
str(self.time['minutes']).zfill(2) + ':' +
str(self.time['seconds']).zfill(2))
raise IllegalState() | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def get_time_as_string(self):
"""stub"""
if self.has_time():
return (str(self.time['hours']).zfill(2) + ':' +
str(self.time['minutes']).zfill(2) + ':' +
str(self.time['seconds']).zfill(2))
raise IllegalState() |
def filelist(folderpath, ext=None):
'''
Returns a list of all the files contained in the folder specified by `folderpath`.
To filter the files by extension simply add a list containing all the extension with `.` as the second argument.
If `flat` is False, then the Path objects are returned.
'''
if not ext:
ext = []
if os.path.exists(folderpath) and os.path.isdir(folderpath):
return [ os.path.join(folderpath, f) for f in os.listdir(folderpath) if os.path.isfile(os.path.join(folderpath, f)) and os.path.splitext(f)[1] in ext ]
else:
log.warn('"{}" does not exist or is not a directory'.format(folderpath)) | Returns a list of all the files contained in the folder specified by `folderpath`.
To filter the files by extension simply add a list containing all the extension with `.` as the second argument.
If `flat` is False, then the Path objects are returned. | Below is the the instruction that describes the task:
### Input:
Returns a list of all the files contained in the folder specified by `folderpath`.
To filter the files by extension simply add a list containing all the extension with `.` as the second argument.
If `flat` is False, then the Path objects are returned.
### Response:
def filelist(folderpath, ext=None):
'''
Returns a list of all the files contained in the folder specified by `folderpath`.
To filter the files by extension simply add a list containing all the extension with `.` as the second argument.
If `flat` is False, then the Path objects are returned.
'''
if not ext:
ext = []
if os.path.exists(folderpath) and os.path.isdir(folderpath):
return [ os.path.join(folderpath, f) for f in os.listdir(folderpath) if os.path.isfile(os.path.join(folderpath, f)) and os.path.splitext(f)[1] in ext ]
else:
log.warn('"{}" does not exist or is not a directory'.format(folderpath)) |
def _get_id2children(id2children, item_id, item_obj):
"""Add the child item IDs for one item object and their children."""
if item_id in id2children:
return id2children[item_id]
child_ids = set()
for child_obj in item_obj.children:
child_id = child_obj.item_id
child_ids.add(child_id)
child_ids |= _get_id2children(id2children, child_id, child_obj)
id2children[item_id] = child_ids
return child_ids | Add the child item IDs for one item object and their children. | Below is the the instruction that describes the task:
### Input:
Add the child item IDs for one item object and their children.
### Response:
def _get_id2children(id2children, item_id, item_obj):
"""Add the child item IDs for one item object and their children."""
if item_id in id2children:
return id2children[item_id]
child_ids = set()
for child_obj in item_obj.children:
child_id = child_obj.item_id
child_ids.add(child_id)
child_ids |= _get_id2children(id2children, child_id, child_obj)
id2children[item_id] = child_ids
return child_ids |
def create_user_pool(PoolName=None, Policies=None, LambdaConfig=None, AutoVerifiedAttributes=None, AliasAttributes=None, SmsVerificationMessage=None, EmailVerificationMessage=None, EmailVerificationSubject=None, SmsAuthenticationMessage=None, MfaConfiguration=None, DeviceConfiguration=None, EmailConfiguration=None, SmsConfiguration=None, UserPoolTags=None, AdminCreateUserConfig=None, Schema=None):
"""
Creates a new Amazon Cognito user pool and sets the password policy for the pool.
See also: AWS API Documentation
:example: response = client.create_user_pool(
PoolName='string',
Policies={
'PasswordPolicy': {
'MinimumLength': 123,
'RequireUppercase': True|False,
'RequireLowercase': True|False,
'RequireNumbers': True|False,
'RequireSymbols': True|False
}
},
LambdaConfig={
'PreSignUp': 'string',
'CustomMessage': 'string',
'PostConfirmation': 'string',
'PreAuthentication': 'string',
'PostAuthentication': 'string',
'DefineAuthChallenge': 'string',
'CreateAuthChallenge': 'string',
'VerifyAuthChallengeResponse': 'string'
},
AutoVerifiedAttributes=[
'phone_number'|'email',
],
AliasAttributes=[
'phone_number'|'email'|'preferred_username',
],
SmsVerificationMessage='string',
EmailVerificationMessage='string',
EmailVerificationSubject='string',
SmsAuthenticationMessage='string',
MfaConfiguration='OFF'|'ON'|'OPTIONAL',
DeviceConfiguration={
'ChallengeRequiredOnNewDevice': True|False,
'DeviceOnlyRememberedOnUserPrompt': True|False
},
EmailConfiguration={
'SourceArn': 'string',
'ReplyToEmailAddress': 'string'
},
SmsConfiguration={
'SnsCallerArn': 'string',
'ExternalId': 'string'
},
UserPoolTags={
'string': 'string'
},
AdminCreateUserConfig={
'AllowAdminCreateUserOnly': True|False,
'UnusedAccountValidityDays': 123,
'InviteMessageTemplate': {
'SMSMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string'
}
},
Schema=[
{
'Name': 'string',
'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean',
'DeveloperOnlyAttribute': True|False,
'Mutable': True|False,
'Required': True|False,
'NumberAttributeConstraints': {
'MinValue': 'string',
'MaxValue': 'string'
},
'StringAttributeConstraints': {
'MinLength': 'string',
'MaxLength': 'string'
}
},
]
)
:type PoolName: string
:param PoolName: [REQUIRED]
A string used to name the user pool.
:type Policies: dict
:param Policies: The policies associated with the new user pool.
PasswordPolicy (dict) --A container for information about the user pool password policy.
MinimumLength (integer) --The minimum length of the password policy that you have set. Cannot be less than 6.
RequireUppercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.
RequireLowercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.
RequireNumbers (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one number in their password.
RequireSymbols (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.
:type LambdaConfig: dict
:param LambdaConfig: The Lambda trigger configuration information for the new user pool.
PreSignUp (string) --A pre-registration AWS Lambda trigger.
CustomMessage (string) --A custom Message AWS Lambda trigger.
PostConfirmation (string) --A post-confirmation AWS Lambda trigger.
PreAuthentication (string) --A pre-authentication AWS Lambda trigger.
PostAuthentication (string) --A post-authentication AWS Lambda trigger.
DefineAuthChallenge (string) --Defines the authentication challenge.
CreateAuthChallenge (string) --Creates an authentication challenge.
VerifyAuthChallengeResponse (string) --Verifies the authentication challenge response.
:type AutoVerifiedAttributes: list
:param AutoVerifiedAttributes: The attributes to be auto-verified. Possible values: email , phone_number .
(string) --
:type AliasAttributes: list
:param AliasAttributes: Attributes supported as an alias for this user pool. Possible values: phone_number , email , or preferred_username .
(string) --
:type SmsVerificationMessage: string
:param SmsVerificationMessage: A string representing the SMS verification message.
:type EmailVerificationMessage: string
:param EmailVerificationMessage: A string representing the email verification message.
:type EmailVerificationSubject: string
:param EmailVerificationSubject: A string representing the email verification subject.
:type SmsAuthenticationMessage: string
:param SmsAuthenticationMessage: A string representing the SMS authentication message.
:type MfaConfiguration: string
:param MfaConfiguration: Specifies MFA configuration details.
:type DeviceConfiguration: dict
:param DeviceConfiguration: The device configuration.
ChallengeRequiredOnNewDevice (boolean) --Indicates whether a challenge is required on a new device. Only applicable to a new device.
DeviceOnlyRememberedOnUserPrompt (boolean) --If true, a device is only remembered on user prompt.
:type EmailConfiguration: dict
:param EmailConfiguration: The email configuration.
SourceArn (string) --The Amazon Resource Name (ARN) of the email source.
ReplyToEmailAddress (string) --The REPLY-TO email address.
:type SmsConfiguration: dict
:param SmsConfiguration: The SMS configuration.
SnsCallerArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller.
ExternalId (string) --The external ID.
:type UserPoolTags: dict
:param UserPoolTags: The cost allocation tags for the user pool. For more information, see Adding Cost Allocation Tags to Your User Pool
(string) --
(string) --
:type AdminCreateUserConfig: dict
:param AdminCreateUserConfig: The configuration for AdminCreateUser requests.
AllowAdminCreateUserOnly (boolean) --Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app.
UnusedAccountValidityDays (integer) --The user account expiration limit, in days, after which the account is no longer usable. To reset the account after that time limit, you must call AdminCreateUser again, specifying 'RESEND' for the MessageAction parameter. The default value for this parameter is 7.
InviteMessageTemplate (dict) --The message template to be used for the welcome message to new users.
SMSMessage (string) --The message template for SMS messages.
EmailMessage (string) --The message template for email messages.
EmailSubject (string) --The subject line for email messages.
:type Schema: list
:param Schema: An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.
(dict) --Contains information about the schema attribute.
Name (string) --A schema attribute of the name type.
AttributeDataType (string) --The attribute data type.
DeveloperOnlyAttribute (boolean) --Specifies whether the attribute type is developer only.
Mutable (boolean) --Specifies whether the attribute can be changed once it has been created.
Required (boolean) --Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.
NumberAttributeConstraints (dict) --Specifies the constraints for an attribute of the number type.
MinValue (string) --The minimum value of an attribute that is of the number data type.
MaxValue (string) --The maximum value of an attribute that is of the number data type.
StringAttributeConstraints (dict) --Specifies the constraints for an attribute of the string type.
MinLength (string) --The minimum length of an attribute value of the string type.
MaxLength (string) --The maximum length of an attribute value of the string type.
:rtype: dict
:return: {
'UserPool': {
'Id': 'string',
'Name': 'string',
'Policies': {
'PasswordPolicy': {
'MinimumLength': 123,
'RequireUppercase': True|False,
'RequireLowercase': True|False,
'RequireNumbers': True|False,
'RequireSymbols': True|False
}
},
'LambdaConfig': {
'PreSignUp': 'string',
'CustomMessage': 'string',
'PostConfirmation': 'string',
'PreAuthentication': 'string',
'PostAuthentication': 'string',
'DefineAuthChallenge': 'string',
'CreateAuthChallenge': 'string',
'VerifyAuthChallengeResponse': 'string'
},
'Status': 'Enabled'|'Disabled',
'LastModifiedDate': datetime(2015, 1, 1),
'CreationDate': datetime(2015, 1, 1),
'SchemaAttributes': [
{
'Name': 'string',
'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean',
'DeveloperOnlyAttribute': True|False,
'Mutable': True|False,
'Required': True|False,
'NumberAttributeConstraints': {
'MinValue': 'string',
'MaxValue': 'string'
},
'StringAttributeConstraints': {
'MinLength': 'string',
'MaxLength': 'string'
}
},
],
'AutoVerifiedAttributes': [
'phone_number'|'email',
],
'AliasAttributes': [
'phone_number'|'email'|'preferred_username',
],
'SmsVerificationMessage': 'string',
'EmailVerificationMessage': 'string',
'EmailVerificationSubject': 'string',
'SmsAuthenticationMessage': 'string',
'MfaConfiguration': 'OFF'|'ON'|'OPTIONAL',
'DeviceConfiguration': {
'ChallengeRequiredOnNewDevice': True|False,
'DeviceOnlyRememberedOnUserPrompt': True|False
},
'EstimatedNumberOfUsers': 123,
'EmailConfiguration': {
'SourceArn': 'string',
'ReplyToEmailAddress': 'string'
},
'SmsConfiguration': {
'SnsCallerArn': 'string',
'ExternalId': 'string'
},
'UserPoolTags': {
'string': 'string'
},
'SmsConfigurationFailure': 'string',
'EmailConfigurationFailure': 'string',
'AdminCreateUserConfig': {
'AllowAdminCreateUserOnly': True|False,
'UnusedAccountValidityDays': 123,
'InviteMessageTemplate': {
'SMSMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string'
}
}
}
}
:returns:
(string) --
"""
pass | Creates a new Amazon Cognito user pool and sets the password policy for the pool.
See also: AWS API Documentation
:example: response = client.create_user_pool(
PoolName='string',
Policies={
'PasswordPolicy': {
'MinimumLength': 123,
'RequireUppercase': True|False,
'RequireLowercase': True|False,
'RequireNumbers': True|False,
'RequireSymbols': True|False
}
},
LambdaConfig={
'PreSignUp': 'string',
'CustomMessage': 'string',
'PostConfirmation': 'string',
'PreAuthentication': 'string',
'PostAuthentication': 'string',
'DefineAuthChallenge': 'string',
'CreateAuthChallenge': 'string',
'VerifyAuthChallengeResponse': 'string'
},
AutoVerifiedAttributes=[
'phone_number'|'email',
],
AliasAttributes=[
'phone_number'|'email'|'preferred_username',
],
SmsVerificationMessage='string',
EmailVerificationMessage='string',
EmailVerificationSubject='string',
SmsAuthenticationMessage='string',
MfaConfiguration='OFF'|'ON'|'OPTIONAL',
DeviceConfiguration={
'ChallengeRequiredOnNewDevice': True|False,
'DeviceOnlyRememberedOnUserPrompt': True|False
},
EmailConfiguration={
'SourceArn': 'string',
'ReplyToEmailAddress': 'string'
},
SmsConfiguration={
'SnsCallerArn': 'string',
'ExternalId': 'string'
},
UserPoolTags={
'string': 'string'
},
AdminCreateUserConfig={
'AllowAdminCreateUserOnly': True|False,
'UnusedAccountValidityDays': 123,
'InviteMessageTemplate': {
'SMSMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string'
}
},
Schema=[
{
'Name': 'string',
'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean',
'DeveloperOnlyAttribute': True|False,
'Mutable': True|False,
'Required': True|False,
'NumberAttributeConstraints': {
'MinValue': 'string',
'MaxValue': 'string'
},
'StringAttributeConstraints': {
'MinLength': 'string',
'MaxLength': 'string'
}
},
]
)
:type PoolName: string
:param PoolName: [REQUIRED]
A string used to name the user pool.
:type Policies: dict
:param Policies: The policies associated with the new user pool.
PasswordPolicy (dict) --A container for information about the user pool password policy.
MinimumLength (integer) --The minimum length of the password policy that you have set. Cannot be less than 6.
RequireUppercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.
RequireLowercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.
RequireNumbers (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one number in their password.
RequireSymbols (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.
:type LambdaConfig: dict
:param LambdaConfig: The Lambda trigger configuration information for the new user pool.
PreSignUp (string) --A pre-registration AWS Lambda trigger.
CustomMessage (string) --A custom Message AWS Lambda trigger.
PostConfirmation (string) --A post-confirmation AWS Lambda trigger.
PreAuthentication (string) --A pre-authentication AWS Lambda trigger.
PostAuthentication (string) --A post-authentication AWS Lambda trigger.
DefineAuthChallenge (string) --Defines the authentication challenge.
CreateAuthChallenge (string) --Creates an authentication challenge.
VerifyAuthChallengeResponse (string) --Verifies the authentication challenge response.
:type AutoVerifiedAttributes: list
:param AutoVerifiedAttributes: The attributes to be auto-verified. Possible values: email , phone_number .
(string) --
:type AliasAttributes: list
:param AliasAttributes: Attributes supported as an alias for this user pool. Possible values: phone_number , email , or preferred_username .
(string) --
:type SmsVerificationMessage: string
:param SmsVerificationMessage: A string representing the SMS verification message.
:type EmailVerificationMessage: string
:param EmailVerificationMessage: A string representing the email verification message.
:type EmailVerificationSubject: string
:param EmailVerificationSubject: A string representing the email verification subject.
:type SmsAuthenticationMessage: string
:param SmsAuthenticationMessage: A string representing the SMS authentication message.
:type MfaConfiguration: string
:param MfaConfiguration: Specifies MFA configuration details.
:type DeviceConfiguration: dict
:param DeviceConfiguration: The device configuration.
ChallengeRequiredOnNewDevice (boolean) --Indicates whether a challenge is required on a new device. Only applicable to a new device.
DeviceOnlyRememberedOnUserPrompt (boolean) --If true, a device is only remembered on user prompt.
:type EmailConfiguration: dict
:param EmailConfiguration: The email configuration.
SourceArn (string) --The Amazon Resource Name (ARN) of the email source.
ReplyToEmailAddress (string) --The REPLY-TO email address.
:type SmsConfiguration: dict
:param SmsConfiguration: The SMS configuration.
SnsCallerArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller.
ExternalId (string) --The external ID.
:type UserPoolTags: dict
:param UserPoolTags: The cost allocation tags for the user pool. For more information, see Adding Cost Allocation Tags to Your User Pool
(string) --
(string) --
:type AdminCreateUserConfig: dict
:param AdminCreateUserConfig: The configuration for AdminCreateUser requests.
AllowAdminCreateUserOnly (boolean) --Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app.
UnusedAccountValidityDays (integer) --The user account expiration limit, in days, after which the account is no longer usable. To reset the account after that time limit, you must call AdminCreateUser again, specifying 'RESEND' for the MessageAction parameter. The default value for this parameter is 7.
InviteMessageTemplate (dict) --The message template to be used for the welcome message to new users.
SMSMessage (string) --The message template for SMS messages.
EmailMessage (string) --The message template for email messages.
EmailSubject (string) --The subject line for email messages.
:type Schema: list
:param Schema: An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.
(dict) --Contains information about the schema attribute.
Name (string) --A schema attribute of the name type.
AttributeDataType (string) --The attribute data type.
DeveloperOnlyAttribute (boolean) --Specifies whether the attribute type is developer only.
Mutable (boolean) --Specifies whether the attribute can be changed once it has been created.
Required (boolean) --Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.
NumberAttributeConstraints (dict) --Specifies the constraints for an attribute of the number type.
MinValue (string) --The minimum value of an attribute that is of the number data type.
MaxValue (string) --The maximum value of an attribute that is of the number data type.
StringAttributeConstraints (dict) --Specifies the constraints for an attribute of the string type.
MinLength (string) --The minimum length of an attribute value of the string type.
MaxLength (string) --The maximum length of an attribute value of the string type.
:rtype: dict
:return: {
'UserPool': {
'Id': 'string',
'Name': 'string',
'Policies': {
'PasswordPolicy': {
'MinimumLength': 123,
'RequireUppercase': True|False,
'RequireLowercase': True|False,
'RequireNumbers': True|False,
'RequireSymbols': True|False
}
},
'LambdaConfig': {
'PreSignUp': 'string',
'CustomMessage': 'string',
'PostConfirmation': 'string',
'PreAuthentication': 'string',
'PostAuthentication': 'string',
'DefineAuthChallenge': 'string',
'CreateAuthChallenge': 'string',
'VerifyAuthChallengeResponse': 'string'
},
'Status': 'Enabled'|'Disabled',
'LastModifiedDate': datetime(2015, 1, 1),
'CreationDate': datetime(2015, 1, 1),
'SchemaAttributes': [
{
'Name': 'string',
'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean',
'DeveloperOnlyAttribute': True|False,
'Mutable': True|False,
'Required': True|False,
'NumberAttributeConstraints': {
'MinValue': 'string',
'MaxValue': 'string'
},
'StringAttributeConstraints': {
'MinLength': 'string',
'MaxLength': 'string'
}
},
],
'AutoVerifiedAttributes': [
'phone_number'|'email',
],
'AliasAttributes': [
'phone_number'|'email'|'preferred_username',
],
'SmsVerificationMessage': 'string',
'EmailVerificationMessage': 'string',
'EmailVerificationSubject': 'string',
'SmsAuthenticationMessage': 'string',
'MfaConfiguration': 'OFF'|'ON'|'OPTIONAL',
'DeviceConfiguration': {
'ChallengeRequiredOnNewDevice': True|False,
'DeviceOnlyRememberedOnUserPrompt': True|False
},
'EstimatedNumberOfUsers': 123,
'EmailConfiguration': {
'SourceArn': 'string',
'ReplyToEmailAddress': 'string'
},
'SmsConfiguration': {
'SnsCallerArn': 'string',
'ExternalId': 'string'
},
'UserPoolTags': {
'string': 'string'
},
'SmsConfigurationFailure': 'string',
'EmailConfigurationFailure': 'string',
'AdminCreateUserConfig': {
'AllowAdminCreateUserOnly': True|False,
'UnusedAccountValidityDays': 123,
'InviteMessageTemplate': {
'SMSMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string'
}
}
}
}
:returns:
(string) -- | Below is the the instruction that describes the task:
### Input:
Creates a new Amazon Cognito user pool and sets the password policy for the pool.
See also: AWS API Documentation
:example: response = client.create_user_pool(
PoolName='string',
Policies={
'PasswordPolicy': {
'MinimumLength': 123,
'RequireUppercase': True|False,
'RequireLowercase': True|False,
'RequireNumbers': True|False,
'RequireSymbols': True|False
}
},
LambdaConfig={
'PreSignUp': 'string',
'CustomMessage': 'string',
'PostConfirmation': 'string',
'PreAuthentication': 'string',
'PostAuthentication': 'string',
'DefineAuthChallenge': 'string',
'CreateAuthChallenge': 'string',
'VerifyAuthChallengeResponse': 'string'
},
AutoVerifiedAttributes=[
'phone_number'|'email',
],
AliasAttributes=[
'phone_number'|'email'|'preferred_username',
],
SmsVerificationMessage='string',
EmailVerificationMessage='string',
EmailVerificationSubject='string',
SmsAuthenticationMessage='string',
MfaConfiguration='OFF'|'ON'|'OPTIONAL',
DeviceConfiguration={
'ChallengeRequiredOnNewDevice': True|False,
'DeviceOnlyRememberedOnUserPrompt': True|False
},
EmailConfiguration={
'SourceArn': 'string',
'ReplyToEmailAddress': 'string'
},
SmsConfiguration={
'SnsCallerArn': 'string',
'ExternalId': 'string'
},
UserPoolTags={
'string': 'string'
},
AdminCreateUserConfig={
'AllowAdminCreateUserOnly': True|False,
'UnusedAccountValidityDays': 123,
'InviteMessageTemplate': {
'SMSMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string'
}
},
Schema=[
{
'Name': 'string',
'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean',
'DeveloperOnlyAttribute': True|False,
'Mutable': True|False,
'Required': True|False,
'NumberAttributeConstraints': {
'MinValue': 'string',
'MaxValue': 'string'
},
'StringAttributeConstraints': {
'MinLength': 'string',
'MaxLength': 'string'
}
},
]
)
:type PoolName: string
:param PoolName: [REQUIRED]
A string used to name the user pool.
:type Policies: dict
:param Policies: The policies associated with the new user pool.
PasswordPolicy (dict) --A container for information about the user pool password policy.
MinimumLength (integer) --The minimum length of the password policy that you have set. Cannot be less than 6.
RequireUppercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.
RequireLowercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.
RequireNumbers (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one number in their password.
RequireSymbols (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.
:type LambdaConfig: dict
:param LambdaConfig: The Lambda trigger configuration information for the new user pool.
PreSignUp (string) --A pre-registration AWS Lambda trigger.
CustomMessage (string) --A custom Message AWS Lambda trigger.
PostConfirmation (string) --A post-confirmation AWS Lambda trigger.
PreAuthentication (string) --A pre-authentication AWS Lambda trigger.
PostAuthentication (string) --A post-authentication AWS Lambda trigger.
DefineAuthChallenge (string) --Defines the authentication challenge.
CreateAuthChallenge (string) --Creates an authentication challenge.
VerifyAuthChallengeResponse (string) --Verifies the authentication challenge response.
:type AutoVerifiedAttributes: list
:param AutoVerifiedAttributes: The attributes to be auto-verified. Possible values: email , phone_number .
(string) --
:type AliasAttributes: list
:param AliasAttributes: Attributes supported as an alias for this user pool. Possible values: phone_number , email , or preferred_username .
(string) --
:type SmsVerificationMessage: string
:param SmsVerificationMessage: A string representing the SMS verification message.
:type EmailVerificationMessage: string
:param EmailVerificationMessage: A string representing the email verification message.
:type EmailVerificationSubject: string
:param EmailVerificationSubject: A string representing the email verification subject.
:type SmsAuthenticationMessage: string
:param SmsAuthenticationMessage: A string representing the SMS authentication message.
:type MfaConfiguration: string
:param MfaConfiguration: Specifies MFA configuration details.
:type DeviceConfiguration: dict
:param DeviceConfiguration: The device configuration.
ChallengeRequiredOnNewDevice (boolean) --Indicates whether a challenge is required on a new device. Only applicable to a new device.
DeviceOnlyRememberedOnUserPrompt (boolean) --If true, a device is only remembered on user prompt.
:type EmailConfiguration: dict
:param EmailConfiguration: The email configuration.
SourceArn (string) --The Amazon Resource Name (ARN) of the email source.
ReplyToEmailAddress (string) --The REPLY-TO email address.
:type SmsConfiguration: dict
:param SmsConfiguration: The SMS configuration.
SnsCallerArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller.
ExternalId (string) --The external ID.
:type UserPoolTags: dict
:param UserPoolTags: The cost allocation tags for the user pool. For more information, see Adding Cost Allocation Tags to Your User Pool
(string) --
(string) --
:type AdminCreateUserConfig: dict
:param AdminCreateUserConfig: The configuration for AdminCreateUser requests.
AllowAdminCreateUserOnly (boolean) --Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app.
UnusedAccountValidityDays (integer) --The user account expiration limit, in days, after which the account is no longer usable. To reset the account after that time limit, you must call AdminCreateUser again, specifying 'RESEND' for the MessageAction parameter. The default value for this parameter is 7.
InviteMessageTemplate (dict) --The message template to be used for the welcome message to new users.
SMSMessage (string) --The message template for SMS messages.
EmailMessage (string) --The message template for email messages.
EmailSubject (string) --The subject line for email messages.
:type Schema: list
:param Schema: An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.
(dict) --Contains information about the schema attribute.
Name (string) --A schema attribute of the name type.
AttributeDataType (string) --The attribute data type.
DeveloperOnlyAttribute (boolean) --Specifies whether the attribute type is developer only.
Mutable (boolean) --Specifies whether the attribute can be changed once it has been created.
Required (boolean) --Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.
NumberAttributeConstraints (dict) --Specifies the constraints for an attribute of the number type.
MinValue (string) --The minimum value of an attribute that is of the number data type.
MaxValue (string) --The maximum value of an attribute that is of the number data type.
StringAttributeConstraints (dict) --Specifies the constraints for an attribute of the string type.
MinLength (string) --The minimum length of an attribute value of the string type.
MaxLength (string) --The maximum length of an attribute value of the string type.
:rtype: dict
:return: {
'UserPool': {
'Id': 'string',
'Name': 'string',
'Policies': {
'PasswordPolicy': {
'MinimumLength': 123,
'RequireUppercase': True|False,
'RequireLowercase': True|False,
'RequireNumbers': True|False,
'RequireSymbols': True|False
}
},
'LambdaConfig': {
'PreSignUp': 'string',
'CustomMessage': 'string',
'PostConfirmation': 'string',
'PreAuthentication': 'string',
'PostAuthentication': 'string',
'DefineAuthChallenge': 'string',
'CreateAuthChallenge': 'string',
'VerifyAuthChallengeResponse': 'string'
},
'Status': 'Enabled'|'Disabled',
'LastModifiedDate': datetime(2015, 1, 1),
'CreationDate': datetime(2015, 1, 1),
'SchemaAttributes': [
{
'Name': 'string',
'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean',
'DeveloperOnlyAttribute': True|False,
'Mutable': True|False,
'Required': True|False,
'NumberAttributeConstraints': {
'MinValue': 'string',
'MaxValue': 'string'
},
'StringAttributeConstraints': {
'MinLength': 'string',
'MaxLength': 'string'
}
},
],
'AutoVerifiedAttributes': [
'phone_number'|'email',
],
'AliasAttributes': [
'phone_number'|'email'|'preferred_username',
],
'SmsVerificationMessage': 'string',
'EmailVerificationMessage': 'string',
'EmailVerificationSubject': 'string',
'SmsAuthenticationMessage': 'string',
'MfaConfiguration': 'OFF'|'ON'|'OPTIONAL',
'DeviceConfiguration': {
'ChallengeRequiredOnNewDevice': True|False,
'DeviceOnlyRememberedOnUserPrompt': True|False
},
'EstimatedNumberOfUsers': 123,
'EmailConfiguration': {
'SourceArn': 'string',
'ReplyToEmailAddress': 'string'
},
'SmsConfiguration': {
'SnsCallerArn': 'string',
'ExternalId': 'string'
},
'UserPoolTags': {
'string': 'string'
},
'SmsConfigurationFailure': 'string',
'EmailConfigurationFailure': 'string',
'AdminCreateUserConfig': {
'AllowAdminCreateUserOnly': True|False,
'UnusedAccountValidityDays': 123,
'InviteMessageTemplate': {
'SMSMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string'
}
}
}
}
:returns:
(string) --
### Response:
def create_user_pool(PoolName=None, Policies=None, LambdaConfig=None, AutoVerifiedAttributes=None, AliasAttributes=None, SmsVerificationMessage=None, EmailVerificationMessage=None, EmailVerificationSubject=None, SmsAuthenticationMessage=None, MfaConfiguration=None, DeviceConfiguration=None, EmailConfiguration=None, SmsConfiguration=None, UserPoolTags=None, AdminCreateUserConfig=None, Schema=None):
"""
Creates a new Amazon Cognito user pool and sets the password policy for the pool.
See also: AWS API Documentation
:example: response = client.create_user_pool(
PoolName='string',
Policies={
'PasswordPolicy': {
'MinimumLength': 123,
'RequireUppercase': True|False,
'RequireLowercase': True|False,
'RequireNumbers': True|False,
'RequireSymbols': True|False
}
},
LambdaConfig={
'PreSignUp': 'string',
'CustomMessage': 'string',
'PostConfirmation': 'string',
'PreAuthentication': 'string',
'PostAuthentication': 'string',
'DefineAuthChallenge': 'string',
'CreateAuthChallenge': 'string',
'VerifyAuthChallengeResponse': 'string'
},
AutoVerifiedAttributes=[
'phone_number'|'email',
],
AliasAttributes=[
'phone_number'|'email'|'preferred_username',
],
SmsVerificationMessage='string',
EmailVerificationMessage='string',
EmailVerificationSubject='string',
SmsAuthenticationMessage='string',
MfaConfiguration='OFF'|'ON'|'OPTIONAL',
DeviceConfiguration={
'ChallengeRequiredOnNewDevice': True|False,
'DeviceOnlyRememberedOnUserPrompt': True|False
},
EmailConfiguration={
'SourceArn': 'string',
'ReplyToEmailAddress': 'string'
},
SmsConfiguration={
'SnsCallerArn': 'string',
'ExternalId': 'string'
},
UserPoolTags={
'string': 'string'
},
AdminCreateUserConfig={
'AllowAdminCreateUserOnly': True|False,
'UnusedAccountValidityDays': 123,
'InviteMessageTemplate': {
'SMSMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string'
}
},
Schema=[
{
'Name': 'string',
'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean',
'DeveloperOnlyAttribute': True|False,
'Mutable': True|False,
'Required': True|False,
'NumberAttributeConstraints': {
'MinValue': 'string',
'MaxValue': 'string'
},
'StringAttributeConstraints': {
'MinLength': 'string',
'MaxLength': 'string'
}
},
]
)
:type PoolName: string
:param PoolName: [REQUIRED]
A string used to name the user pool.
:type Policies: dict
:param Policies: The policies associated with the new user pool.
PasswordPolicy (dict) --A container for information about the user pool password policy.
MinimumLength (integer) --The minimum length of the password policy that you have set. Cannot be less than 6.
RequireUppercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.
RequireLowercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.
RequireNumbers (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one number in their password.
RequireSymbols (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.
:type LambdaConfig: dict
:param LambdaConfig: The Lambda trigger configuration information for the new user pool.
PreSignUp (string) --A pre-registration AWS Lambda trigger.
CustomMessage (string) --A custom Message AWS Lambda trigger.
PostConfirmation (string) --A post-confirmation AWS Lambda trigger.
PreAuthentication (string) --A pre-authentication AWS Lambda trigger.
PostAuthentication (string) --A post-authentication AWS Lambda trigger.
DefineAuthChallenge (string) --Defines the authentication challenge.
CreateAuthChallenge (string) --Creates an authentication challenge.
VerifyAuthChallengeResponse (string) --Verifies the authentication challenge response.
:type AutoVerifiedAttributes: list
:param AutoVerifiedAttributes: The attributes to be auto-verified. Possible values: email , phone_number .
(string) --
:type AliasAttributes: list
:param AliasAttributes: Attributes supported as an alias for this user pool. Possible values: phone_number , email , or preferred_username .
(string) --
:type SmsVerificationMessage: string
:param SmsVerificationMessage: A string representing the SMS verification message.
:type EmailVerificationMessage: string
:param EmailVerificationMessage: A string representing the email verification message.
:type EmailVerificationSubject: string
:param EmailVerificationSubject: A string representing the email verification subject.
:type SmsAuthenticationMessage: string
:param SmsAuthenticationMessage: A string representing the SMS authentication message.
:type MfaConfiguration: string
:param MfaConfiguration: Specifies MFA configuration details.
:type DeviceConfiguration: dict
:param DeviceConfiguration: The device configuration.
ChallengeRequiredOnNewDevice (boolean) --Indicates whether a challenge is required on a new device. Only applicable to a new device.
DeviceOnlyRememberedOnUserPrompt (boolean) --If true, a device is only remembered on user prompt.
:type EmailConfiguration: dict
:param EmailConfiguration: The email configuration.
SourceArn (string) --The Amazon Resource Name (ARN) of the email source.
ReplyToEmailAddress (string) --The REPLY-TO email address.
:type SmsConfiguration: dict
:param SmsConfiguration: The SMS configuration.
SnsCallerArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller.
ExternalId (string) --The external ID.
:type UserPoolTags: dict
:param UserPoolTags: The cost allocation tags for the user pool. For more information, see Adding Cost Allocation Tags to Your User Pool
(string) --
(string) --
:type AdminCreateUserConfig: dict
:param AdminCreateUserConfig: The configuration for AdminCreateUser requests.
AllowAdminCreateUserOnly (boolean) --Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app.
UnusedAccountValidityDays (integer) --The user account expiration limit, in days, after which the account is no longer usable. To reset the account after that time limit, you must call AdminCreateUser again, specifying 'RESEND' for the MessageAction parameter. The default value for this parameter is 7.
InviteMessageTemplate (dict) --The message template to be used for the welcome message to new users.
SMSMessage (string) --The message template for SMS messages.
EmailMessage (string) --The message template for email messages.
EmailSubject (string) --The subject line for email messages.
:type Schema: list
:param Schema: An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.
(dict) --Contains information about the schema attribute.
Name (string) --A schema attribute of the name type.
AttributeDataType (string) --The attribute data type.
DeveloperOnlyAttribute (boolean) --Specifies whether the attribute type is developer only.
Mutable (boolean) --Specifies whether the attribute can be changed once it has been created.
Required (boolean) --Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.
NumberAttributeConstraints (dict) --Specifies the constraints for an attribute of the number type.
MinValue (string) --The minimum value of an attribute that is of the number data type.
MaxValue (string) --The maximum value of an attribute that is of the number data type.
StringAttributeConstraints (dict) --Specifies the constraints for an attribute of the string type.
MinLength (string) --The minimum length of an attribute value of the string type.
MaxLength (string) --The maximum length of an attribute value of the string type.
:rtype: dict
:return: {
'UserPool': {
'Id': 'string',
'Name': 'string',
'Policies': {
'PasswordPolicy': {
'MinimumLength': 123,
'RequireUppercase': True|False,
'RequireLowercase': True|False,
'RequireNumbers': True|False,
'RequireSymbols': True|False
}
},
'LambdaConfig': {
'PreSignUp': 'string',
'CustomMessage': 'string',
'PostConfirmation': 'string',
'PreAuthentication': 'string',
'PostAuthentication': 'string',
'DefineAuthChallenge': 'string',
'CreateAuthChallenge': 'string',
'VerifyAuthChallengeResponse': 'string'
},
'Status': 'Enabled'|'Disabled',
'LastModifiedDate': datetime(2015, 1, 1),
'CreationDate': datetime(2015, 1, 1),
'SchemaAttributes': [
{
'Name': 'string',
'AttributeDataType': 'String'|'Number'|'DateTime'|'Boolean',
'DeveloperOnlyAttribute': True|False,
'Mutable': True|False,
'Required': True|False,
'NumberAttributeConstraints': {
'MinValue': 'string',
'MaxValue': 'string'
},
'StringAttributeConstraints': {
'MinLength': 'string',
'MaxLength': 'string'
}
},
],
'AutoVerifiedAttributes': [
'phone_number'|'email',
],
'AliasAttributes': [
'phone_number'|'email'|'preferred_username',
],
'SmsVerificationMessage': 'string',
'EmailVerificationMessage': 'string',
'EmailVerificationSubject': 'string',
'SmsAuthenticationMessage': 'string',
'MfaConfiguration': 'OFF'|'ON'|'OPTIONAL',
'DeviceConfiguration': {
'ChallengeRequiredOnNewDevice': True|False,
'DeviceOnlyRememberedOnUserPrompt': True|False
},
'EstimatedNumberOfUsers': 123,
'EmailConfiguration': {
'SourceArn': 'string',
'ReplyToEmailAddress': 'string'
},
'SmsConfiguration': {
'SnsCallerArn': 'string',
'ExternalId': 'string'
},
'UserPoolTags': {
'string': 'string'
},
'SmsConfigurationFailure': 'string',
'EmailConfigurationFailure': 'string',
'AdminCreateUserConfig': {
'AllowAdminCreateUserOnly': True|False,
'UnusedAccountValidityDays': 123,
'InviteMessageTemplate': {
'SMSMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string'
}
}
}
}
:returns:
(string) --
"""
pass |
def victims(self, filters=None, params=None):
"""
Gets all victims from a tag.
"""
victim = self._tcex.ti.victim(None)
for v in self.tc_requests.victims_from_tag(
victim, self.name, filters=filters, params=params
):
yield v | Gets all victims from a tag. | Below is the the instruction that describes the task:
### Input:
Gets all victims from a tag.
### Response:
def victims(self, filters=None, params=None):
"""
Gets all victims from a tag.
"""
victim = self._tcex.ti.victim(None)
for v in self.tc_requests.victims_from_tag(
victim, self.name, filters=filters, params=params
):
yield v |
def authenticationAndCipheringResponse(
AuthenticationParameterSRES_presence=0,
MobileId_presence=0):
"""AUTHENTICATION AND CIPHERING RESPONSE Section 9.4.10"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x13) # 00010011
c = AcReferenceNumberAndSpareHalfOctets()
packet = a / b / c
if AuthenticationParameterSRES_presence is 1:
e = AuthenticationParameterSRES(ieiAPS=0x22)
packet = packet / e
if MobileId_presence is 1:
f = MobileIdHdr(ieiMI=0x23, eightBitMI=0x0)
packet = packet / f
return packet | AUTHENTICATION AND CIPHERING RESPONSE Section 9.4.10 | Below is the the instruction that describes the task:
### Input:
AUTHENTICATION AND CIPHERING RESPONSE Section 9.4.10
### Response:
def authenticationAndCipheringResponse(
AuthenticationParameterSRES_presence=0,
MobileId_presence=0):
"""AUTHENTICATION AND CIPHERING RESPONSE Section 9.4.10"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x13) # 00010011
c = AcReferenceNumberAndSpareHalfOctets()
packet = a / b / c
if AuthenticationParameterSRES_presence is 1:
e = AuthenticationParameterSRES(ieiAPS=0x22)
packet = packet / e
if MobileId_presence is 1:
f = MobileIdHdr(ieiMI=0x23, eightBitMI=0x0)
packet = packet / f
return packet |
def deploy_snmp(snmp, host=None, admin_username=None,
admin_password=None, module=None):
'''
Change the QuickDeploy SNMP community string, used for switches as well
CLI Example:
.. code-block:: bash
salt dell dracr.deploy_snmp SNMP_STRING
host=<remote DRAC or CMC> admin_username=<DRAC user>
admin_password=<DRAC PW>
salt dell dracr.deploy_password diana secret
'''
return __execute_cmd('deploy -v SNMPv2 {0} ro'.format(snmp),
host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module) | Change the QuickDeploy SNMP community string, used for switches as well
CLI Example:
.. code-block:: bash
salt dell dracr.deploy_snmp SNMP_STRING
host=<remote DRAC or CMC> admin_username=<DRAC user>
admin_password=<DRAC PW>
salt dell dracr.deploy_password diana secret | Below is the the instruction that describes the task:
### Input:
Change the QuickDeploy SNMP community string, used for switches as well
CLI Example:
.. code-block:: bash
salt dell dracr.deploy_snmp SNMP_STRING
host=<remote DRAC or CMC> admin_username=<DRAC user>
admin_password=<DRAC PW>
salt dell dracr.deploy_password diana secret
### Response:
def deploy_snmp(snmp, host=None, admin_username=None,
admin_password=None, module=None):
'''
Change the QuickDeploy SNMP community string, used for switches as well
CLI Example:
.. code-block:: bash
salt dell dracr.deploy_snmp SNMP_STRING
host=<remote DRAC or CMC> admin_username=<DRAC user>
admin_password=<DRAC PW>
salt dell dracr.deploy_password diana secret
'''
return __execute_cmd('deploy -v SNMPv2 {0} ro'.format(snmp),
host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module) |
def lstsq(cls, a, b):
"""Return the least-squares solution to a linear matrix equation.
:param Matrix a: Design matrix with the values of the independent variables.
:param Matrix b: Matrix with the "dependent variable" values.
b can only have one column.
:raise: Raises an :py:exc:`ValueError`, if
- the number of rows of a and b does not match.
- b has more than one column.
:note: The algorithm solves the following equations.
beta = a^+ b.
"""
# Check if the size of the input matrices matches
if a.get_height() != b.get_height():
raise ValueError("Size of input matrices does not match")
if b.get_width() != 1:
raise ValueError("Matrix with dependent variable has more than 1 column")
aPseudo = a.pseudoinverse()
# The following code could be used if c is regular.
# aTrans = a.transform()
# c = aTrans * a
# invers() raises an ValueError, if c is not invertible
# cInvers = c.invers()
# beta = cInvers * aTrans * b
beta = aPseudo * b
return beta | Return the least-squares solution to a linear matrix equation.
:param Matrix a: Design matrix with the values of the independent variables.
:param Matrix b: Matrix with the "dependent variable" values.
b can only have one column.
:raise: Raises an :py:exc:`ValueError`, if
- the number of rows of a and b does not match.
- b has more than one column.
:note: The algorithm solves the following equations.
beta = a^+ b. | Below is the the instruction that describes the task:
### Input:
Return the least-squares solution to a linear matrix equation.
:param Matrix a: Design matrix with the values of the independent variables.
:param Matrix b: Matrix with the "dependent variable" values.
b can only have one column.
:raise: Raises an :py:exc:`ValueError`, if
- the number of rows of a and b does not match.
- b has more than one column.
:note: The algorithm solves the following equations.
beta = a^+ b.
### Response:
def lstsq(cls, a, b):
"""Return the least-squares solution to a linear matrix equation.
:param Matrix a: Design matrix with the values of the independent variables.
:param Matrix b: Matrix with the "dependent variable" values.
b can only have one column.
:raise: Raises an :py:exc:`ValueError`, if
- the number of rows of a and b does not match.
- b has more than one column.
:note: The algorithm solves the following equations.
beta = a^+ b.
"""
# Check if the size of the input matrices matches
if a.get_height() != b.get_height():
raise ValueError("Size of input matrices does not match")
if b.get_width() != 1:
raise ValueError("Matrix with dependent variable has more than 1 column")
aPseudo = a.pseudoinverse()
# The following code could be used if c is regular.
# aTrans = a.transform()
# c = aTrans * a
# invers() raises an ValueError, if c is not invertible
# cInvers = c.invers()
# beta = cInvers * aTrans * b
beta = aPseudo * b
return beta |
def _generateTabularData(res, oldTabularData = {}, isTerminal=False, canUnicode=True):
"""
Method that recovers the values and columns from the current structure
This method is used by:
- usufyToCsvExport
- usufyToOdsExport
- usufyToXlsExport
- usufyToXlsxExport
Args:
-----
res: New data to export.
oldTabularData: The previous data stored.
{
"OSRFramework": [
[
"i3visio.alias",
"i3visio.platform",
"i3visio.uri"
],
[
"i3visio",
"Twitter",
"https://twitter.com/i3visio",
]
]
}
isTerminal: If isTerminal is activated, only information related to
relevant utils will be shown.
canUnicode: Variable that stores if the printed output can deal with
Unicode characters.
Returns:
--------
The values, as a dictionary containing all the information stored.
Values is like:
{
"OSRFramework": [
[
"i3visio.alias",
"i3visio.platform",
"i3visio.uri"
],
[
"i3visio",
"Twitter",
"https://twitter.com/i3visio",
],
[
"i3visio",
"Github",
"https://github.com/i3visio",
]
]
}
"""
def _grabbingNewHeader(h):
"""
Updates the headers to be general.
Changing the starting @ for a '_' and changing the "i3visio." for
"i3visio_". Changed in 0.9.4+.
Args:
-----
h: A header to be sanitised.
Returns:
--------
string: The modified header.
"""
if h[0] == "@":
h = h.replace("@", "_")
elif "i3visio." in h:
h = h.replace("i3visio.", "i3visio_")
return h
# Entities allowed for the output in terminal
allowedInTerminal = [
"i3visio_alias",
"i3visio_uri",
"i3visio_platform",
"i3visio_email",
"i3visio_ipv4",
"i3visio_phone",
"i3visio_dni",
"i3visio_domain",
"i3visio_platform_leaked",
#"_source"
]
# List of profiles found
values = {}
headers = ["_id"]
try:
if not isTerminal:
# Recovering the headers in the first line of the old Data
headers = oldTabularData["OSRFramework"][0]
else:
# Recovering only the printable headers if in Terminal mode
oldHeaders = oldTabularData["OSRFramework"][0]
headers = []
for h in oldHeaders:
h = _grabbingNewHeader(h)
if h in allowedInTerminal:
# Set to simplify the table shown in mailfy for leaked platforms
if h in ["i3visio_domain", "i3visio_alias"] and "_source" in old_headers:
pass
else:
headers.append(h)
# Changing the starting @ for a '_' and changing the "i3visio." for "i3visio_". Changed in 0.9.4+
for i, h in enumerate(headers):
h = _grabbingNewHeader(h)
# Replacing the header
headers[i] = h
except:
# No previous files... Easy...
headers = ["_id"]
# We are assuming that we received a list of profiles.
for p in res:
# Creating the dictionaries
values[p["value"]] = {}
attributes = p["attributes"]
# Processing all the attributes found
for a in attributes:
# Grabbing the type in the new format
h = _grabbingNewHeader(a["type"])
# Default behaviour for the output methods
if not isTerminal:
values[p["value"]][h] = a["value"]
# Appending the column if not already included
if str(h) not in headers:
headers.append(str(h))
# Specific table construction for the terminal output
else:
if h in allowedInTerminal:
values[p["value"]][h] = a["value"]
# Appending the column if not already included
if str(h) not in headers:
headers.append(str(h))
data = {}
# Note that each row should be a list!
workingSheet = []
# Appending the headers
workingSheet.append(headers)
# First, we will iterate through the previously stored values
try:
for dataRow in oldTabularData["OSRFramework"][1:]:
# Recovering the previous data
newRow = []
for cell in dataRow:
newRow.append(cell)
# Now, we will fill the rest of the cells with "N/A" values
for i in range(len(headers)-len(dataRow)):
# Printing a Not Applicable value
newRow.append("[N/A]")
# Appending the newRow to the data structure
workingSheet.append(newRow)
except Exception, e:
# No previous value found!
pass
# After having all the previous data stored an updated... We will go through the rest:
for prof in values.keys():
# Creating an empty structure
newRow = []
for i, col in enumerate(headers):
try:
if col == "_id":
newRow.append(len(workingSheet))
else:
if canUnicode:
newRow.append(unicode(values[prof][col]))
else:
newRow.append(str(values[prof][col]))
except UnicodeEncodeError as e:
# Printing that an error was found
newRow.append("[WARNING: Unicode Encode]")
except:
# Printing that this is not applicable value
newRow.append("[N/A]")
# Appending the newRow to the data structure
workingSheet.append(newRow)
# Storing the workingSheet onto the data structure to be stored
data.update({"OSRFramework": workingSheet})
return data | Method that recovers the values and columns from the current structure
This method is used by:
- usufyToCsvExport
- usufyToOdsExport
- usufyToXlsExport
- usufyToXlsxExport
Args:
-----
res: New data to export.
oldTabularData: The previous data stored.
{
"OSRFramework": [
[
"i3visio.alias",
"i3visio.platform",
"i3visio.uri"
],
[
"i3visio",
"Twitter",
"https://twitter.com/i3visio",
]
]
}
isTerminal: If isTerminal is activated, only information related to
relevant utils will be shown.
canUnicode: Variable that stores if the printed output can deal with
Unicode characters.
Returns:
--------
The values, as a dictionary containing all the information stored.
Values is like:
{
"OSRFramework": [
[
"i3visio.alias",
"i3visio.platform",
"i3visio.uri"
],
[
"i3visio",
"Twitter",
"https://twitter.com/i3visio",
],
[
"i3visio",
"Github",
"https://github.com/i3visio",
]
]
} | Below is the the instruction that describes the task:
### Input:
Method that recovers the values and columns from the current structure
This method is used by:
- usufyToCsvExport
- usufyToOdsExport
- usufyToXlsExport
- usufyToXlsxExport
Args:
-----
res: New data to export.
oldTabularData: The previous data stored.
{
"OSRFramework": [
[
"i3visio.alias",
"i3visio.platform",
"i3visio.uri"
],
[
"i3visio",
"Twitter",
"https://twitter.com/i3visio",
]
]
}
isTerminal: If isTerminal is activated, only information related to
relevant utils will be shown.
canUnicode: Variable that stores if the printed output can deal with
Unicode characters.
Returns:
--------
The values, as a dictionary containing all the information stored.
Values is like:
{
"OSRFramework": [
[
"i3visio.alias",
"i3visio.platform",
"i3visio.uri"
],
[
"i3visio",
"Twitter",
"https://twitter.com/i3visio",
],
[
"i3visio",
"Github",
"https://github.com/i3visio",
]
]
}
### Response:
def _generateTabularData(res, oldTabularData = {}, isTerminal=False, canUnicode=True):
"""
Method that recovers the values and columns from the current structure
This method is used by:
- usufyToCsvExport
- usufyToOdsExport
- usufyToXlsExport
- usufyToXlsxExport
Args:
-----
res: New data to export.
oldTabularData: The previous data stored.
{
"OSRFramework": [
[
"i3visio.alias",
"i3visio.platform",
"i3visio.uri"
],
[
"i3visio",
"Twitter",
"https://twitter.com/i3visio",
]
]
}
isTerminal: If isTerminal is activated, only information related to
relevant utils will be shown.
canUnicode: Variable that stores if the printed output can deal with
Unicode characters.
Returns:
--------
The values, as a dictionary containing all the information stored.
Values is like:
{
"OSRFramework": [
[
"i3visio.alias",
"i3visio.platform",
"i3visio.uri"
],
[
"i3visio",
"Twitter",
"https://twitter.com/i3visio",
],
[
"i3visio",
"Github",
"https://github.com/i3visio",
]
]
}
"""
def _grabbingNewHeader(h):
"""
Updates the headers to be general.
Changing the starting @ for a '_' and changing the "i3visio." for
"i3visio_". Changed in 0.9.4+.
Args:
-----
h: A header to be sanitised.
Returns:
--------
string: The modified header.
"""
if h[0] == "@":
h = h.replace("@", "_")
elif "i3visio." in h:
h = h.replace("i3visio.", "i3visio_")
return h
# Entities allowed for the output in terminal
allowedInTerminal = [
"i3visio_alias",
"i3visio_uri",
"i3visio_platform",
"i3visio_email",
"i3visio_ipv4",
"i3visio_phone",
"i3visio_dni",
"i3visio_domain",
"i3visio_platform_leaked",
#"_source"
]
# List of profiles found
values = {}
headers = ["_id"]
try:
if not isTerminal:
# Recovering the headers in the first line of the old Data
headers = oldTabularData["OSRFramework"][0]
else:
# Recovering only the printable headers if in Terminal mode
oldHeaders = oldTabularData["OSRFramework"][0]
headers = []
for h in oldHeaders:
h = _grabbingNewHeader(h)
if h in allowedInTerminal:
# Set to simplify the table shown in mailfy for leaked platforms
if h in ["i3visio_domain", "i3visio_alias"] and "_source" in old_headers:
pass
else:
headers.append(h)
# Changing the starting @ for a '_' and changing the "i3visio." for "i3visio_". Changed in 0.9.4+
for i, h in enumerate(headers):
h = _grabbingNewHeader(h)
# Replacing the header
headers[i] = h
except:
# No previous files... Easy...
headers = ["_id"]
# We are assuming that we received a list of profiles.
for p in res:
# Creating the dictionaries
values[p["value"]] = {}
attributes = p["attributes"]
# Processing all the attributes found
for a in attributes:
# Grabbing the type in the new format
h = _grabbingNewHeader(a["type"])
# Default behaviour for the output methods
if not isTerminal:
values[p["value"]][h] = a["value"]
# Appending the column if not already included
if str(h) not in headers:
headers.append(str(h))
# Specific table construction for the terminal output
else:
if h in allowedInTerminal:
values[p["value"]][h] = a["value"]
# Appending the column if not already included
if str(h) not in headers:
headers.append(str(h))
data = {}
# Note that each row should be a list!
workingSheet = []
# Appending the headers
workingSheet.append(headers)
# First, we will iterate through the previously stored values
try:
for dataRow in oldTabularData["OSRFramework"][1:]:
# Recovering the previous data
newRow = []
for cell in dataRow:
newRow.append(cell)
# Now, we will fill the rest of the cells with "N/A" values
for i in range(len(headers)-len(dataRow)):
# Printing a Not Applicable value
newRow.append("[N/A]")
# Appending the newRow to the data structure
workingSheet.append(newRow)
except Exception, e:
# No previous value found!
pass
# After having all the previous data stored an updated... We will go through the rest:
for prof in values.keys():
# Creating an empty structure
newRow = []
for i, col in enumerate(headers):
try:
if col == "_id":
newRow.append(len(workingSheet))
else:
if canUnicode:
newRow.append(unicode(values[prof][col]))
else:
newRow.append(str(values[prof][col]))
except UnicodeEncodeError as e:
# Printing that an error was found
newRow.append("[WARNING: Unicode Encode]")
except:
# Printing that this is not applicable value
newRow.append("[N/A]")
# Appending the newRow to the data structure
workingSheet.append(newRow)
# Storing the workingSheet onto the data structure to be stored
data.update({"OSRFramework": workingSheet})
return data |
def find_components_without_annotation(model, components):
"""
Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation.
"""
return [elem for elem in getattr(model, components) if
elem.annotation is None or len(elem.annotation) == 0] | Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation. | Below is the the instruction that describes the task:
### Input:
Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation.
### Response:
def find_components_without_annotation(model, components):
"""
Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation.
"""
return [elem for elem in getattr(model, components) if
elem.annotation is None or len(elem.annotation) == 0] |
def _parse_file(filename, relpath=None):
"""
Return a list of :class:`_PyconfigCall` from parsing `filename`.
:param filename: A file to parse
:param relpath: Relative directory to strip (optional)
:type filename: str
:type relpath: str
"""
with open(filename, 'r') as source:
source = source.read()
pyconfig_calls = []
try:
nodes = ast.parse(source, filename=filename)
except SyntaxError:
# XXX(Jake): We might want to handle this differently
return []
# Look for UTF-8 encoding
first_lines = source[0:200]
match = re.match('^#.*coding[:=].?([a-zA-Z0-9-_]+).*', first_lines)
if match:
try:
coding = match.group(1)
source = source.decode(coding)
except:
print("# Error decoding file, may not parse correctly:", filename)
try:
# Split the source into lines so we can reference it easily
source = source.split('\n')
except:
print("# Error parsing file, ignoring:", filename);
return []
# Make the filename relative to the given path, if needed
if relpath:
filename = os.path.relpath(filename, relpath)
for call in ast.walk(nodes):
if not isinstance(call, _ast.Call):
# Skip any node that isn't a Call
continue
func = call.func
if not isinstance(call.func, _ast.Attribute):
# We're looking for calls to pyconfig.*, so the function has to be
# an Attribute node, otherwise skip it
continue
if getattr(func.value, 'id', None) != 'pyconfig':
# If the Attribute value isn't a Name (doesn't have an `id`) or it
# isn't 'pyconfig', then we skip
continue
if func.attr not in ['get', 'set', 'setting']:
# If the Attribute attr isn't one of the pyconfig API methods, then
# we skip
continue
# Now we parse the call arguments as best we can
args = []
if call.args:
arg = call.args[0]
if isinstance(arg, _ast.Str):
args.append(arg.s)
else:
args.append(_map_arg(arg))
for arg in call.args[1:]:
args.append(_map_arg(arg))
line = (filename, source[call.lineno-1], call.lineno, call.col_offset)
call = _PyconfigCall(func.attr, args[0], args[1:], line)
pyconfig_calls.append(call)
return pyconfig_calls | Return a list of :class:`_PyconfigCall` from parsing `filename`.
:param filename: A file to parse
:param relpath: Relative directory to strip (optional)
:type filename: str
:type relpath: str | Below is the the instruction that describes the task:
### Input:
Return a list of :class:`_PyconfigCall` from parsing `filename`.
:param filename: A file to parse
:param relpath: Relative directory to strip (optional)
:type filename: str
:type relpath: str
### Response:
def _parse_file(filename, relpath=None):
"""
Return a list of :class:`_PyconfigCall` from parsing `filename`.
:param filename: A file to parse
:param relpath: Relative directory to strip (optional)
:type filename: str
:type relpath: str
"""
with open(filename, 'r') as source:
source = source.read()
pyconfig_calls = []
try:
nodes = ast.parse(source, filename=filename)
except SyntaxError:
# XXX(Jake): We might want to handle this differently
return []
# Look for UTF-8 encoding
first_lines = source[0:200]
match = re.match('^#.*coding[:=].?([a-zA-Z0-9-_]+).*', first_lines)
if match:
try:
coding = match.group(1)
source = source.decode(coding)
except:
print("# Error decoding file, may not parse correctly:", filename)
try:
# Split the source into lines so we can reference it easily
source = source.split('\n')
except:
print("# Error parsing file, ignoring:", filename);
return []
# Make the filename relative to the given path, if needed
if relpath:
filename = os.path.relpath(filename, relpath)
for call in ast.walk(nodes):
if not isinstance(call, _ast.Call):
# Skip any node that isn't a Call
continue
func = call.func
if not isinstance(call.func, _ast.Attribute):
# We're looking for calls to pyconfig.*, so the function has to be
# an Attribute node, otherwise skip it
continue
if getattr(func.value, 'id', None) != 'pyconfig':
# If the Attribute value isn't a Name (doesn't have an `id`) or it
# isn't 'pyconfig', then we skip
continue
if func.attr not in ['get', 'set', 'setting']:
# If the Attribute attr isn't one of the pyconfig API methods, then
# we skip
continue
# Now we parse the call arguments as best we can
args = []
if call.args:
arg = call.args[0]
if isinstance(arg, _ast.Str):
args.append(arg.s)
else:
args.append(_map_arg(arg))
for arg in call.args[1:]:
args.append(_map_arg(arg))
line = (filename, source[call.lineno-1], call.lineno, call.col_offset)
call = _PyconfigCall(func.attr, args[0], args[1:], line)
pyconfig_calls.append(call)
return pyconfig_calls |
def hash_vector(self, v, querying=False):
"""
Hashes the vector and returns the binary bucket key as string.
"""
if scipy.sparse.issparse(v):
# If vector is sparse, make sure we have the CSR representation
# of the projection matrix
if self.normals_csr == None:
self.normals_csr = scipy.sparse.csr_matrix(self.normals)
# Make sure that we are using CSR format for multiplication
if not scipy.sparse.isspmatrix_csr(v):
v = scipy.sparse.csr_matrix(v)
# Project vector onto all hyperplane normals
projection = self.normals_csr.dot(v)
else:
# Project vector onto all hyperplane normals
projection = numpy.dot(self.normals, v)
# Return binary key
return [''.join(['1' if x > 0.0 else '0' for x in projection])] | Hashes the vector and returns the binary bucket key as string. | Below is the the instruction that describes the task:
### Input:
Hashes the vector and returns the binary bucket key as string.
### Response:
def hash_vector(self, v, querying=False):
"""
Hashes the vector and returns the binary bucket key as string.
"""
if scipy.sparse.issparse(v):
# If vector is sparse, make sure we have the CSR representation
# of the projection matrix
if self.normals_csr == None:
self.normals_csr = scipy.sparse.csr_matrix(self.normals)
# Make sure that we are using CSR format for multiplication
if not scipy.sparse.isspmatrix_csr(v):
v = scipy.sparse.csr_matrix(v)
# Project vector onto all hyperplane normals
projection = self.normals_csr.dot(v)
else:
# Project vector onto all hyperplane normals
projection = numpy.dot(self.normals, v)
# Return binary key
return [''.join(['1' if x > 0.0 else '0' for x in projection])] |
def GetCampaigns(self, client_customer_id):
"""Returns a client account's Campaigns that haven't been removed.
Args:
client_customer_id: str Client Customer Id used to retrieve Campaigns.
Returns:
list List of Campaign data objects.
"""
self.client.SetClientCustomerId(client_customer_id)
# A somewhat hackish workaround for "The read operation timed out" error,
# which could be triggered on AppEngine's end if the request is too large
# and is taking too long.
max_tries = 3
today = time.strftime('%Y%m%d', time.localtime())
for i in xrange(1, max_tries + 1):
try:
selector = {
'fields': ['Id', 'Name', 'Status', 'BudgetId', 'Amount'],
'predicates': [
{
'field': 'Status',
'operator': 'NOT_EQUALS',
'values': ['REMOVED']
}
],
'dateRange': {
'min': today,
'max': today
}
}
campaigns = self.client.GetService('CampaignService').get(selector)
if int(campaigns['totalNumEntries']) > 0:
return campaigns['entries']
else:
return None
except Exception, e:
if i == max_tries:
raise GoogleAdsError(e)
continue | Returns a client account's Campaigns that haven't been removed.
Args:
client_customer_id: str Client Customer Id used to retrieve Campaigns.
Returns:
list List of Campaign data objects. | Below is the the instruction that describes the task:
### Input:
Returns a client account's Campaigns that haven't been removed.
Args:
client_customer_id: str Client Customer Id used to retrieve Campaigns.
Returns:
list List of Campaign data objects.
### Response:
def GetCampaigns(self, client_customer_id):
"""Returns a client account's Campaigns that haven't been removed.
Args:
client_customer_id: str Client Customer Id used to retrieve Campaigns.
Returns:
list List of Campaign data objects.
"""
self.client.SetClientCustomerId(client_customer_id)
# A somewhat hackish workaround for "The read operation timed out" error,
# which could be triggered on AppEngine's end if the request is too large
# and is taking too long.
max_tries = 3
today = time.strftime('%Y%m%d', time.localtime())
for i in xrange(1, max_tries + 1):
try:
selector = {
'fields': ['Id', 'Name', 'Status', 'BudgetId', 'Amount'],
'predicates': [
{
'field': 'Status',
'operator': 'NOT_EQUALS',
'values': ['REMOVED']
}
],
'dateRange': {
'min': today,
'max': today
}
}
campaigns = self.client.GetService('CampaignService').get(selector)
if int(campaigns['totalNumEntries']) > 0:
return campaigns['entries']
else:
return None
except Exception, e:
if i == max_tries:
raise GoogleAdsError(e)
continue |
def get_related(page):
"""
Returns list of related Entry instances for specified page.
:param page: the page instance.
:rtype: list.
"""
related = []
entry = Entry.get_for_model(page)
if entry:
related = entry.related
return related | Returns list of related Entry instances for specified page.
:param page: the page instance.
:rtype: list. | Below is the the instruction that describes the task:
### Input:
Returns list of related Entry instances for specified page.
:param page: the page instance.
:rtype: list.
### Response:
def get_related(page):
"""
Returns list of related Entry instances for specified page.
:param page: the page instance.
:rtype: list.
"""
related = []
entry = Entry.get_for_model(page)
if entry:
related = entry.related
return related |
def download_extract(url):
"""download and extract file."""
logger.info("Downloading %s", url)
request = urllib2.Request(url)
request.add_header('User-Agent',
'caelum/0.1 +https://github.com/nrcharles/caelum')
opener = urllib2.build_opener()
with tempfile.TemporaryFile(suffix='.zip', dir=env.WEATHER_DATA_PATH) \
as local_file:
logger.debug('Saving to temporary file %s', local_file.name)
local_file.write(opener.open(request).read())
compressed_file = zipfile.ZipFile(local_file, 'r')
logger.debug('Extracting %s', compressed_file)
compressed_file.extractall(env.WEATHER_DATA_PATH)
local_file.close() | download and extract file. | Below is the the instruction that describes the task:
### Input:
download and extract file.
### Response:
def download_extract(url):
"""download and extract file."""
logger.info("Downloading %s", url)
request = urllib2.Request(url)
request.add_header('User-Agent',
'caelum/0.1 +https://github.com/nrcharles/caelum')
opener = urllib2.build_opener()
with tempfile.TemporaryFile(suffix='.zip', dir=env.WEATHER_DATA_PATH) \
as local_file:
logger.debug('Saving to temporary file %s', local_file.name)
local_file.write(opener.open(request).read())
compressed_file = zipfile.ZipFile(local_file, 'r')
logger.debug('Extracting %s', compressed_file)
compressed_file.extractall(env.WEATHER_DATA_PATH)
local_file.close() |
def reset(self, history=None):
"""
Resets the Runner's internal stats counters.
If history is empty, use default values in history.get().
Args:
history (dict): A dictionary containing an already run experiment's results. Keys should be:
episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)
"""
if not history:
history = dict()
self.episode_rewards = history.get("episode_rewards", list())
self.episode_timesteps = history.get("episode_timesteps", list())
self.episode_times = history.get("episode_times", list()) | Resets the Runner's internal stats counters.
If history is empty, use default values in history.get().
Args:
history (dict): A dictionary containing an already run experiment's results. Keys should be:
episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times) | Below is the the instruction that describes the task:
### Input:
Resets the Runner's internal stats counters.
If history is empty, use default values in history.get().
Args:
history (dict): A dictionary containing an already run experiment's results. Keys should be:
episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)
### Response:
def reset(self, history=None):
"""
Resets the Runner's internal stats counters.
If history is empty, use default values in history.get().
Args:
history (dict): A dictionary containing an already run experiment's results. Keys should be:
episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)
"""
if not history:
history = dict()
self.episode_rewards = history.get("episode_rewards", list())
self.episode_timesteps = history.get("episode_timesteps", list())
self.episode_times = history.get("episode_times", list()) |
def zone_transfer(address, dns_name):
"""
Tries to perform a zone transfer.
"""
ips = []
try:
print_notification("Attempting dns zone transfer for {} on {}".format(dns_name, address))
z = dns.zone.from_xfr(dns.query.xfr(address, dns_name))
except dns.exception.FormError:
print_notification("Zone transfer not allowed")
return ips
names = z.nodes.keys()
print_success("Zone transfer successfull for {}, found {} entries".format(address, len(names)))
for n in names:
node = z[n]
data = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A)
if data:
# TODO add hostnames to entries.
# hostname = n.to_text()
for item in data.items:
address = item.address
ips.append(address)
return ips | Tries to perform a zone transfer. | Below is the the instruction that describes the task:
### Input:
Tries to perform a zone transfer.
### Response:
def zone_transfer(address, dns_name):
"""
Tries to perform a zone transfer.
"""
ips = []
try:
print_notification("Attempting dns zone transfer for {} on {}".format(dns_name, address))
z = dns.zone.from_xfr(dns.query.xfr(address, dns_name))
except dns.exception.FormError:
print_notification("Zone transfer not allowed")
return ips
names = z.nodes.keys()
print_success("Zone transfer successfull for {}, found {} entries".format(address, len(names)))
for n in names:
node = z[n]
data = node.get_rdataset(dns.rdataclass.IN, dns.rdatatype.A)
if data:
# TODO add hostnames to entries.
# hostname = n.to_text()
for item in data.items:
address = item.address
ips.append(address)
return ips |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.