code
stringlengths 64
7.01k
| docstring
stringlengths 2
15.8k
| text
stringlengths 144
19.2k
|
|---|---|---|
#vtb
def json_schema(self, schema_id=None, is_main_schema=True):
Schema = self.__class__
s = self._schema
i = self._ignore_extra_keys
flavor = _priority(s)
if flavor != DICT and is_main_schema:
raise ValueError("The main schema must be a dict.")
if flavor == TYPE:
return {"type": {int: "integer", float: "number", bool: "boolean"}.get(s, "string")}
elif flavor == ITERABLE and len(s) == 1:
return {"type": "array", "items": Schema(s[0]).json_schema(is_main_schema=False)}
elif isinstance(s, Or):
values = [Schema(or_key).json_schema(is_main_schema=False) for or_key in s._args]
any_of = []
for value in values:
if value not in any_of:
any_of.append(value)
return {"anyOf": any_of}
if flavor != DICT:
return {}
if is_main_schema and not schema_id:
raise ValueError("schema_id is required.")
required_keys = []
expanded_schema = {}
for key in s:
if isinstance(key, Hook):
continue
if isinstance(s[key], Schema):
sub_schema = s[key]
else:
sub_schema = Schema(s[key], ignore_extra_keys=i)
sub_schema_json = sub_schema.json_schema(is_main_schema=False)
is_optional = False
if isinstance(key, Optional):
key = key._schema
is_optional = True
if isinstance(key, str):
if not is_optional:
required_keys.append(key)
expanded_schema[key] = sub_schema_json
elif isinstance(key, Or):
for or_key in key._args:
expanded_schema[or_key] = sub_schema_json
schema_dict = {
"type": "object",
"properties": expanded_schema,
"required": required_keys,
"additionalProperties": i,
}
if is_main_schema:
schema_dict.update({"id": schema_id, "$schema": "http://json-schema.org/draft-07/schema
return schema_dict
|
Generate a draft-07 JSON schema dict representing the Schema.
This method can only be called when the Schema's value is a dict.
This method must be called with a schema_id. Calling it without one
is used in a recursive context for sub schemas.
|
### Input:
Generate a draft-07 JSON schema dict representing the Schema.
This method can only be called when the Schema's value is a dict.
This method must be called with a schema_id. Calling it without one
is used in a recursive context for sub schemas.
### Response:
#vtb
def json_schema(self, schema_id=None, is_main_schema=True):
Schema = self.__class__
s = self._schema
i = self._ignore_extra_keys
flavor = _priority(s)
if flavor != DICT and is_main_schema:
raise ValueError("The main schema must be a dict.")
if flavor == TYPE:
return {"type": {int: "integer", float: "number", bool: "boolean"}.get(s, "string")}
elif flavor == ITERABLE and len(s) == 1:
return {"type": "array", "items": Schema(s[0]).json_schema(is_main_schema=False)}
elif isinstance(s, Or):
values = [Schema(or_key).json_schema(is_main_schema=False) for or_key in s._args]
any_of = []
for value in values:
if value not in any_of:
any_of.append(value)
return {"anyOf": any_of}
if flavor != DICT:
return {}
if is_main_schema and not schema_id:
raise ValueError("schema_id is required.")
required_keys = []
expanded_schema = {}
for key in s:
if isinstance(key, Hook):
continue
if isinstance(s[key], Schema):
sub_schema = s[key]
else:
sub_schema = Schema(s[key], ignore_extra_keys=i)
sub_schema_json = sub_schema.json_schema(is_main_schema=False)
is_optional = False
if isinstance(key, Optional):
key = key._schema
is_optional = True
if isinstance(key, str):
if not is_optional:
required_keys.append(key)
expanded_schema[key] = sub_schema_json
elif isinstance(key, Or):
for or_key in key._args:
expanded_schema[or_key] = sub_schema_json
schema_dict = {
"type": "object",
"properties": expanded_schema,
"required": required_keys,
"additionalProperties": i,
}
if is_main_schema:
schema_dict.update({"id": schema_id, "$schema": "http://json-schema.org/draft-07/schema
return schema_dict
|
#vtb
def getOr(subject, predicate, *args, **kwargs):
if (subject, predicate, None) in get_graph():
return Metadata(node=get_graph().objects(subject, predicate).__next__())
return Metadata(*args, **kwargs)
|
Retrieve a metadata node or generate a new one
:param subject: Subject to which the metadata node should be connected
:param predicate: Predicate by which the metadata node should be connected
:return: Metadata for given node
:rtype: Metadata
|
### Input:
Retrieve a metadata node or generate a new one
:param subject: Subject to which the metadata node should be connected
:param predicate: Predicate by which the metadata node should be connected
:return: Metadata for given node
:rtype: Metadata
### Response:
#vtb
def getOr(subject, predicate, *args, **kwargs):
if (subject, predicate, None) in get_graph():
return Metadata(node=get_graph().objects(subject, predicate).__next__())
return Metadata(*args, **kwargs)
|
#vtb
def pop(self, *args):
value = list.pop(self, *args)
index = self._dict.pop(value.id)
if len(args) == 0 or args == [-1]:
return value
_dict = self._dict
for i, j in iteritems(_dict):
if j > index:
_dict[i] = j - 1
return value
|
remove and return item at index (default last).
|
### Input:
remove and return item at index (default last).
### Response:
#vtb
def pop(self, *args):
value = list.pop(self, *args)
index = self._dict.pop(value.id)
if len(args) == 0 or args == [-1]:
return value
_dict = self._dict
for i, j in iteritems(_dict):
if j > index:
_dict[i] = j - 1
return value
|
#vtb
def delete_disk(kwargs=None, conn=None, call=None):
if call != :
raise SaltCloudSystemExit(
)
if kwargs is None:
kwargs = {}
if not in kwargs:
raise SaltCloudSystemExit()
if not conn:
conn = get_conn()
try:
data = conn.delete_disk(kwargs[], kwargs.get(, False))
return {: }
except AzureMissingResourceHttpError as exc:
raise SaltCloudSystemExit(.format(kwargs[], exc.message))
|
.. versionadded:: 2015.8.0
Delete a specific disk associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_disk my-azure name=my_disk
salt-cloud -f delete_disk my-azure name=my_disk delete_vhd=True
|
### Input:
.. versionadded:: 2015.8.0
Delete a specific disk associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_disk my-azure name=my_disk
salt-cloud -f delete_disk my-azure name=my_disk delete_vhd=True
### Response:
#vtb
def delete_disk(kwargs=None, conn=None, call=None):
if call != :
raise SaltCloudSystemExit(
)
if kwargs is None:
kwargs = {}
if not in kwargs:
raise SaltCloudSystemExit()
if not conn:
conn = get_conn()
try:
data = conn.delete_disk(kwargs[], kwargs.get(, False))
return {: }
except AzureMissingResourceHttpError as exc:
raise SaltCloudSystemExit(.format(kwargs[], exc.message))
|
#vtb
def slices(self):
return (slice(self.iymin, self.iymax), slice(self.ixmin, self.ixmax))
|
The bounding box as a tuple of `slice` objects.
The slice tuple is in numpy axis order (i.e. ``(y, x)``) and
therefore can be used to slice numpy arrays.
|
### Input:
The bounding box as a tuple of `slice` objects.
The slice tuple is in numpy axis order (i.e. ``(y, x)``) and
therefore can be used to slice numpy arrays.
### Response:
#vtb
def slices(self):
return (slice(self.iymin, self.iymax), slice(self.ixmin, self.ixmax))
|
#vtb
def get_installed_classes(cls):
installed_classes = {}
for entry_point in pkg_resources.iter_entry_points(cls.entry_point):
try:
plugin = entry_point.load()
except ImportError as e:
logger.error(
"Could not load plugin %s: %s", entry_point.name, str(e)
)
continue
if not issubclass(plugin, cls):
logger.error(
"Could not load plugin %s:" +
" %s class is not subclass of %s",
entry_point.name, plugin.__class__.__name__, cls.__name__
)
continue
if not plugin.validate_dependencies():
logger.error(
"Could not load plugin %s:" +
" %s class dependencies not met",
entry_point.name, plugin.__name__
)
continue
installed_classes[entry_point.name] = plugin
return installed_classes
|
Iterates over installed plugins associated with the `entry_point` and
returns a dictionary of viable ones keyed off of their names.
A viable installed plugin is one that is both loadable *and* a subclass
of the Pluggable subclass in question.
|
### Input:
Iterates over installed plugins associated with the `entry_point` and
returns a dictionary of viable ones keyed off of their names.
A viable installed plugin is one that is both loadable *and* a subclass
of the Pluggable subclass in question.
### Response:
#vtb
def get_installed_classes(cls):
installed_classes = {}
for entry_point in pkg_resources.iter_entry_points(cls.entry_point):
try:
plugin = entry_point.load()
except ImportError as e:
logger.error(
"Could not load plugin %s: %s", entry_point.name, str(e)
)
continue
if not issubclass(plugin, cls):
logger.error(
"Could not load plugin %s:" +
" %s class is not subclass of %s",
entry_point.name, plugin.__class__.__name__, cls.__name__
)
continue
if not plugin.validate_dependencies():
logger.error(
"Could not load plugin %s:" +
" %s class dependencies not met",
entry_point.name, plugin.__name__
)
continue
installed_classes[entry_point.name] = plugin
return installed_classes
|
#vtb
def configKeyButtons( self, enableButtons = [], bounceTime = DEF_BOUNCE_TIME_NORMAL, pullUpDown = GPIO.PUD_UP, event = GPIO.BOTH ):
for key in enableButtons:
self.setKeyButton( key["id"], key["callback"], bounceTime, pullUpDown, event )
pass
|
!
\~english
Config multi key buttons IO and event on same time
@param enableButtons: an array of key button configs. eg. <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: Default set to GPIO.PUD_UP
@param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~chinese
同时配置多个按键IO和事件
@param enableButtons: 组按键配置 例如: <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: 默认 GPIO.PUD_UP
@param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~ \n
@see DEF_BOUNCE_TIME_SHORT_MON (10ms)
@see DEF_BOUNCE_TIME_SHORT (50ms)
@see DEF_BOUNCE_TIME_NORMAL (100ms)
@see DEF_BOUNCE_TIME_LONG (200ms)
|
### Input:
!
\~english
Config multi key buttons IO and event on same time
@param enableButtons: an array of key button configs. eg. <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: Default set to GPIO.PUD_UP
@param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~chinese
同时配置多个按键IO和事件
@param enableButtons: 组按键配置 例如: <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: 默认 GPIO.PUD_UP
@param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~ \n
@see DEF_BOUNCE_TIME_SHORT_MON (10ms)
@see DEF_BOUNCE_TIME_SHORT (50ms)
@see DEF_BOUNCE_TIME_NORMAL (100ms)
@see DEF_BOUNCE_TIME_LONG (200ms)
### Response:
#vtb
def configKeyButtons( self, enableButtons = [], bounceTime = DEF_BOUNCE_TIME_NORMAL, pullUpDown = GPIO.PUD_UP, event = GPIO.BOTH ):
for key in enableButtons:
self.setKeyButton( key["id"], key["callback"], bounceTime, pullUpDown, event )
pass
|
#vtb
def remove_all_listeners(self, event=None):
if event is not None:
self._registered_events[event] = OrderedDict()
else:
self._registered_events = defaultdict(OrderedDict)
|
Remove all functions for all events, or one event if one is specifed.
:param event: Optional event you wish to remove all functions from
|
### Input:
Remove all functions for all events, or one event if one is specifed.
:param event: Optional event you wish to remove all functions from
### Response:
#vtb
def remove_all_listeners(self, event=None):
if event is not None:
self._registered_events[event] = OrderedDict()
else:
self._registered_events = defaultdict(OrderedDict)
|
#vtb
def proc_polyline(self, tokens):
pts = [(p["x"], p["y"]) for p in tokens["points"]]
component = Polyline(pen=self.pen, points=pts)
return component
|
Returns the components of a polyline.
|
### Input:
Returns the components of a polyline.
### Response:
#vtb
def proc_polyline(self, tokens):
pts = [(p["x"], p["y"]) for p in tokens["points"]]
component = Polyline(pen=self.pen, points=pts)
return component
|
#vtb
def set_wallpaper(image):
desktop_env = system.get_name()
if desktop_env in [, , , , ]:
uri = % image
SCHEMA =
KEY =
if desktop_env == :
uri = image
SCHEMA =
KEY =
try:
from gi.repository import Gio
gsettings = Gio.Settings.new(SCHEMA)
gsettings.set_string(KEY, uri)
except ImportError:
try:
gsettings_proc = sp.Popen(
[, , SCHEMA, KEY, uri])
except:
sp.Popen([,
,
,
,
,
% image],
stdout=sp.PIPE)
finally:
gsettings_proc.communicate()
if gsettings_proc.returncode != 0:
sp.Popen([,
,
,
,
,
% image])
elif desktop_env == :
sp.Popen(
[,
,
,
,
,
image]
)
elif desktop_env == :
kde_script = dedent(
).format(image)
sp.Popen(
[,
,
,
,
,
,
.format(kde_script)]
)
elif desktop_env in [, ]:
args = % image
sp.Popen(args, shell=True)
elif desktop_env == :
reg add "HKEY_CURRENT_USER\Control Panel\Desktop" \
/v Wallpaper /t REG_SZ /d %s /f
rundll32.exe user32.dll,UpdatePerUserSystemParameters
wallscript.batwmacFindertell application "System Events"
set desktopCount to count of desktops
repeat with desktopNumber from 1 to desktopCount
tell desktop desktopNumber
set picture to POSIX file "%s"
end tell
end repeat
end tellosascriptfeh--bg-scale', image])
except:
pass
|
Set the desktop wallpaper.
Sets the desktop wallpaper to an image.
Args:
image (str): The path to the image to be set as wallpaper.
|
### Input:
Set the desktop wallpaper.
Sets the desktop wallpaper to an image.
Args:
image (str): The path to the image to be set as wallpaper.
### Response:
#vtb
def set_wallpaper(image):
desktop_env = system.get_name()
if desktop_env in [, , , , ]:
uri = % image
SCHEMA =
KEY =
if desktop_env == :
uri = image
SCHEMA =
KEY =
try:
from gi.repository import Gio
gsettings = Gio.Settings.new(SCHEMA)
gsettings.set_string(KEY, uri)
except ImportError:
try:
gsettings_proc = sp.Popen(
[, , SCHEMA, KEY, uri])
except:
sp.Popen([,
,
,
,
,
% image],
stdout=sp.PIPE)
finally:
gsettings_proc.communicate()
if gsettings_proc.returncode != 0:
sp.Popen([,
,
,
,
,
% image])
elif desktop_env == :
sp.Popen(
[,
,
,
,
,
image]
)
elif desktop_env == :
kde_script = dedent(
).format(image)
sp.Popen(
[,
,
,
,
,
,
.format(kde_script)]
)
elif desktop_env in [, ]:
args = % image
sp.Popen(args, shell=True)
elif desktop_env == :
reg add "HKEY_CURRENT_USER\Control Panel\Desktop" \
/v Wallpaper /t REG_SZ /d %s /f
rundll32.exe user32.dll,UpdatePerUserSystemParameters
wallscript.batwmacFindertell application "System Events"
set desktopCount to count of desktops
repeat with desktopNumber from 1 to desktopCount
tell desktop desktopNumber
set picture to POSIX file "%s"
end tell
end repeat
end tellosascriptfeh--bg-scale', image])
except:
pass
|
#vtb
def list_vnets(access_token, subscription_id):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
,
, NETWORK_API])
return do_get(endpoint, access_token)
|
List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties.
|
### Input:
List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties.
### Response:
#vtb
def list_vnets(access_token, subscription_id):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
,
, NETWORK_API])
return do_get(endpoint, access_token)
|
#vtb
def get_resource_area_by_host(self, area_id, host_id):
route_values = {}
if area_id is not None:
route_values[] = self._serialize.url(, area_id, )
query_parameters = {}
if host_id is not None:
query_parameters[] = self._serialize.query(, host_id, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response)
|
GetResourceAreaByHost.
[Preview API]
:param str area_id:
:param str host_id:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>`
|
### Input:
GetResourceAreaByHost.
[Preview API]
:param str area_id:
:param str host_id:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>`
### Response:
#vtb
def get_resource_area_by_host(self, area_id, host_id):
route_values = {}
if area_id is not None:
route_values[] = self._serialize.url(, area_id, )
query_parameters = {}
if host_id is not None:
query_parameters[] = self._serialize.query(, host_id, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response)
|
#vtb
def do_quit(self, _: argparse.Namespace) -> bool:
self._should_quit = True
return self._STOP_AND_EXIT
|
Exit this application
|
### Input:
Exit this application
### Response:
#vtb
def do_quit(self, _: argparse.Namespace) -> bool:
self._should_quit = True
return self._STOP_AND_EXIT
|
#vtb
def cached(cls, timeout=60, cache_none=False):
return CachedModel(cls=cls, timeout=timeout, cache_none=cache_none)
|
Cache queries
:param timeout: cache timeout
:param cache_none: cache None result
Usage::
>>> Model.cached(60).query({...})
|
### Input:
Cache queries
:param timeout: cache timeout
:param cache_none: cache None result
Usage::
>>> Model.cached(60).query({...})
### Response:
#vtb
def cached(cls, timeout=60, cache_none=False):
return CachedModel(cls=cls, timeout=timeout, cache_none=cache_none)
|
#vtb
def setCamera(self, camera_name, bit_depth=16):
self.coeffs[] = camera_name
self.coeffs[] = bit_depth
|
Args:
camera_name (str): Name of the camera
bit_depth (int): depth (bit) of the camera sensor
|
### Input:
Args:
camera_name (str): Name of the camera
bit_depth (int): depth (bit) of the camera sensor
### Response:
#vtb
def setCamera(self, camera_name, bit_depth=16):
self.coeffs[] = camera_name
self.coeffs[] = bit_depth
|
#vtb
def _cache_lookup(word, data_dir, native=False):
trans_dir = "translations"
if native:
trans_dir += "_native"
logger.debug("Cache lookup: %s", word)
filename = data_dir.joinpath(trans_dir, "{}.html".format(word))
if filename.is_file():
with open(filename, mode="r") as f:
logger.debug("Cache found: %s", word)
translation = _parse_cached(f.read())
return translation
logger.debug("Cache miss: %s", word)
return None
|
Checks if word is in cache.
Parameters
----------
word : str
Word to check in cache.
data_dir : pathlib.Path
Cache directory location.
Returns
-------
translation : str or None
Translation of given word.
|
### Input:
Checks if word is in cache.
Parameters
----------
word : str
Word to check in cache.
data_dir : pathlib.Path
Cache directory location.
Returns
-------
translation : str or None
Translation of given word.
### Response:
#vtb
def _cache_lookup(word, data_dir, native=False):
trans_dir = "translations"
if native:
trans_dir += "_native"
logger.debug("Cache lookup: %s", word)
filename = data_dir.joinpath(trans_dir, "{}.html".format(word))
if filename.is_file():
with open(filename, mode="r") as f:
logger.debug("Cache found: %s", word)
translation = _parse_cached(f.read())
return translation
logger.debug("Cache miss: %s", word)
return None
|
#vtb
def less_than_obs_constraints(self):
obs = self.observation_data
lt_obs = obs.loc[obs.apply(lambda x: self._is_less_const(x.obgnme) \
and x.weight != 0.0,axis=1),"obsnme"]
return lt_obs
|
get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints
|
### Input:
get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints
### Response:
#vtb
def less_than_obs_constraints(self):
obs = self.observation_data
lt_obs = obs.loc[obs.apply(lambda x: self._is_less_const(x.obgnme) \
and x.weight != 0.0,axis=1),"obsnme"]
return lt_obs
|
#vtb
def get_review_id(self, id_):
return _get_request(_REVIEW_ID.format(c_api=_C_API_BEGINNING,
api=_API_VERSION,
id_=id_,
at=self.access_token))
|
Get a particular review id, independent from the user_id and
startup_id
|
### Input:
Get a particular review id, independent from the user_id and
startup_id
### Response:
#vtb
def get_review_id(self, id_):
return _get_request(_REVIEW_ID.format(c_api=_C_API_BEGINNING,
api=_API_VERSION,
id_=id_,
at=self.access_token))
|
#vtb
def find(self, obj, filter_to_class=Ingredient, constructor=None):
if callable(constructor):
obj = constructor(obj, shelf=self)
if isinstance(obj, basestring):
set_descending = obj.startswith()
if set_descending:
obj = obj[1:]
if obj not in self:
raise BadRecipe("{} doesn{} is not a {}desc{} is not a {}'.format(obj, filter_to_class))
|
Find an Ingredient, optionally using the shelf.
:param obj: A string or Ingredient
:param filter_to_class: The Ingredient subclass that obj must be an
instance of
:param constructor: An optional callable for building Ingredients
from obj
:return: An Ingredient of subclass `filter_to_class`
|
### Input:
Find an Ingredient, optionally using the shelf.
:param obj: A string or Ingredient
:param filter_to_class: The Ingredient subclass that obj must be an
instance of
:param constructor: An optional callable for building Ingredients
from obj
:return: An Ingredient of subclass `filter_to_class`
### Response:
#vtb
def find(self, obj, filter_to_class=Ingredient, constructor=None):
if callable(constructor):
obj = constructor(obj, shelf=self)
if isinstance(obj, basestring):
set_descending = obj.startswith()
if set_descending:
obj = obj[1:]
if obj not in self:
raise BadRecipe("{} doesn{} is not a {}desc{} is not a {}'.format(obj, filter_to_class))
|
#vtb
def subdevicenames(self) -> Tuple[str, ...]:
self: NetCDFVariableBase
return tuple(self.sequences.keys())
|
A |tuple| containing the device names.
|
### Input:
A |tuple| containing the device names.
### Response:
#vtb
def subdevicenames(self) -> Tuple[str, ...]:
self: NetCDFVariableBase
return tuple(self.sequences.keys())
|
#vtb
def query_source(self, source):
return self._get_repo_filter(Layer.objects).filter(url=source)
|
Query by source
|
### Input:
Query by source
### Response:
#vtb
def query_source(self, source):
return self._get_repo_filter(Layer.objects).filter(url=source)
|
#vtb
def migrate(move_data=True, update_alias=True):
next_index = PATTERN.replace(, datetime.now().strftime())
es = connections.get_connection()
es.indices.create(index=next_index)
if move_data:
es.reindex(
body={"source": {"index": ALIAS}, "dest": {"index": next_index}},
request_timeout=3600
)
es.indices.refresh(index=next_index)
if update_alias:
es.indices.update_aliases(body={
: [
{"remove": {"alias": ALIAS, "index": PATTERN}},
{"add": {"alias": ALIAS, "index": next_index}},
]
})
|
Upgrade function that creates a new index for the data. Optionally it also can
(and by default will) reindex previous copy of the data into the new index
(specify ``move_data=False`` to skip this step) and update the alias to
point to the latest index (set ``update_alias=False`` to skip).
Note that while this function is running the application can still perform
any and all searches without any loss of functionality. It should, however,
not perform any writes at this time as those might be lost.
|
### Input:
Upgrade function that creates a new index for the data. Optionally it also can
(and by default will) reindex previous copy of the data into the new index
(specify ``move_data=False`` to skip this step) and update the alias to
point to the latest index (set ``update_alias=False`` to skip).
Note that while this function is running the application can still perform
any and all searches without any loss of functionality. It should, however,
not perform any writes at this time as those might be lost.
### Response:
#vtb
def migrate(move_data=True, update_alias=True):
next_index = PATTERN.replace(, datetime.now().strftime())
es = connections.get_connection()
es.indices.create(index=next_index)
if move_data:
es.reindex(
body={"source": {"index": ALIAS}, "dest": {"index": next_index}},
request_timeout=3600
)
es.indices.refresh(index=next_index)
if update_alias:
es.indices.update_aliases(body={
: [
{"remove": {"alias": ALIAS, "index": PATTERN}},
{"add": {"alias": ALIAS, "index": next_index}},
]
})
|
#vtb
def platform_information(_linux_distribution=None):
linux_distribution = _linux_distribution or platform.linux_distribution
distro, release, codename = linux_distribution()
if not distro:
distro, release, codename = parse_os_release()
if not codename and in distro.lower():
debian_codenames = {
: ,
: ,
: ,
: ,
: ,
}
major_version = release.split()[0]
codename = debian_codenames.get(major_version, )
if not codename and in release:
major, minor = release.split()
if minor == :
codename = minor
else:
codename = major
if not codename and in distro.lower():
codename =
if not codename and in distro.lower():
codename =
if not codename and in distro.lower():
codename =
return (
str(distro).rstrip(),
str(release).rstrip(),
str(codename).rstrip()
)
|
detect platform information from remote host
|
### Input:
detect platform information from remote host
### Response:
#vtb
def platform_information(_linux_distribution=None):
linux_distribution = _linux_distribution or platform.linux_distribution
distro, release, codename = linux_distribution()
if not distro:
distro, release, codename = parse_os_release()
if not codename and in distro.lower():
debian_codenames = {
: ,
: ,
: ,
: ,
: ,
}
major_version = release.split()[0]
codename = debian_codenames.get(major_version, )
if not codename and in release:
major, minor = release.split()
if minor == :
codename = minor
else:
codename = major
if not codename and in distro.lower():
codename =
if not codename and in distro.lower():
codename =
if not codename and in distro.lower():
codename =
return (
str(distro).rstrip(),
str(release).rstrip(),
str(codename).rstrip()
)
|
#vtb
def update_remote_archive(self, save_uri, timeout=-1):
return self._client.update_with_zero_body(uri=save_uri, timeout=timeout)
|
Saves a backup of the appliance to a previously-configured remote location.
Args:
save_uri (dict): The URI for saving the backup to a previously configured location.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Backup details.
|
### Input:
Saves a backup of the appliance to a previously-configured remote location.
Args:
save_uri (dict): The URI for saving the backup to a previously configured location.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Backup details.
### Response:
#vtb
def update_remote_archive(self, save_uri, timeout=-1):
return self._client.update_with_zero_body(uri=save_uri, timeout=timeout)
|
#vtb
def write_frames(filename, frames, compression=257, compression_level=6):
from LDAStools import frameCPP
stream = open_gwf(filename, )
if isinstance(frames, frameCPP.FrameH):
frames = [frames]
for frame in frames:
stream.WriteFrame(frame, compression, compression_level)
|
Write a list of frame objects to a file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
filename : `str`
path to write into
frames : `list` of `LDAStools.frameCPP.FrameH`
list of frames to write into file
compression : `int`, optional
enum value for compression scheme, default is ``GZIP``
compression_level : `int`, optional
compression level for given scheme
|
### Input:
Write a list of frame objects to a file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
filename : `str`
path to write into
frames : `list` of `LDAStools.frameCPP.FrameH`
list of frames to write into file
compression : `int`, optional
enum value for compression scheme, default is ``GZIP``
compression_level : `int`, optional
compression level for given scheme
### Response:
#vtb
def write_frames(filename, frames, compression=257, compression_level=6):
from LDAStools import frameCPP
stream = open_gwf(filename, )
if isinstance(frames, frameCPP.FrameH):
frames = [frames]
for frame in frames:
stream.WriteFrame(frame, compression, compression_level)
|
#vtb
def spell(self, word: str) -> List[str]:
if not word:
return ""
candidates = (
self.known([word])
or self.known(_edits1(word))
or self.known(_edits2(word))
or [word]
)
candidates.sort(key=self.freq, reverse=True)
return candidates
|
Return a list of possible words, according to edit distance of 1 and 2,
sorted by frequency of word occurrance in the spelling dictionary
:param str word: A word to check its spelling
|
### Input:
Return a list of possible words, according to edit distance of 1 and 2,
sorted by frequency of word occurrance in the spelling dictionary
:param str word: A word to check its spelling
### Response:
#vtb
def spell(self, word: str) -> List[str]:
if not word:
return ""
candidates = (
self.known([word])
or self.known(_edits1(word))
or self.known(_edits2(word))
or [word]
)
candidates.sort(key=self.freq, reverse=True)
return candidates
|
#vtb
def getPlainText(self, identify=None):
frags = getattr(self, , None)
if frags:
plains = []
for frag in frags:
if hasattr(frag, ):
plains.append(frag.text)
return .join(plains)
elif identify:
text = getattr(self, , None)
if text is None: text = repr(self)
return text
else:
return
|
Convenience function for templates which want access
to the raw text, without XML tags.
|
### Input:
Convenience function for templates which want access
to the raw text, without XML tags.
### Response:
#vtb
def getPlainText(self, identify=None):
frags = getattr(self, , None)
if frags:
plains = []
for frag in frags:
if hasattr(frag, ):
plains.append(frag.text)
return .join(plains)
elif identify:
text = getattr(self, , None)
if text is None: text = repr(self)
return text
else:
return
|
#vtb
def get_subtree(self, name):
r
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._get_subtree(name)
|
r"""
Get all node names in a sub-tree.
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example, pprint
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> pprint.pprint(tobj.get_subtree('root.branch1'))
['root.branch1',
'root.branch1.leaf1',
'root.branch1.leaf1.subleaf1',
'root.branch1.leaf2',
'root.branch1.leaf2.subleaf2']
|
### Input:
r"""
Get all node names in a sub-tree.
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example, pprint
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> pprint.pprint(tobj.get_subtree('root.branch1'))
['root.branch1',
'root.branch1.leaf1',
'root.branch1.leaf1.subleaf1',
'root.branch1.leaf2',
'root.branch1.leaf2.subleaf2']
### Response:
#vtb
def get_subtree(self, name):
r
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._get_subtree(name)
|
#vtb
def filename(self):
if self._filename:
return self._filename
else:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(self._bytes)
return f.name
|
Returns the provided data as a file location.
|
### Input:
Returns the provided data as a file location.
### Response:
#vtb
def filename(self):
if self._filename:
return self._filename
else:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(self._bytes)
return f.name
|
#vtb
def apply_to_segmentlist(self, seglist):
for i, seg in enumerate(seglist):
seglist[i] = seg.__class__(seg[0] - self.low_window, seg[1] + self.high_window)
|
Apply our low and high windows to the segments in a
segmentlist.
|
### Input:
Apply our low and high windows to the segments in a
segmentlist.
### Response:
#vtb
def apply_to_segmentlist(self, seglist):
for i, seg in enumerate(seglist):
seglist[i] = seg.__class__(seg[0] - self.low_window, seg[1] + self.high_window)
|
#vtb
def element_focus_should_be_set(self, locator):
self._info("Verifying element focus is set" % locator)
self._check_element_focus(True, locator)
|
Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
|
### Input:
Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
### Response:
#vtb
def element_focus_should_be_set(self, locator):
self._info("Verifying element focus is set" % locator)
self._check_element_focus(True, locator)
|
#vtb
def _generate_version(base_version):
pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version
if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
return base_version
return "{base_version}+{short_sha}{dirty}".format(
base_version=base_version,
short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
dirty=".mod" if _is_dirty(pkg_dir) else "",
)
|
Generate a version with information about the git repository
|
### Input:
Generate a version with information about the git repository
### Response:
#vtb
def _generate_version(base_version):
pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version
if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
return base_version
return "{base_version}+{short_sha}{dirty}".format(
base_version=base_version,
short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
dirty=".mod" if _is_dirty(pkg_dir) else "",
)
|
#vtb
def merge(
self, reservation_order_id, sources=None, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._merge_initial(
reservation_order_id=reservation_order_id,
sources=sources,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize(, response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
,
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
Merges two `Reservation`s.
Merge the specified `Reservation`s into a new `Reservation`. The two
`Reservation`s being merged must have same properties.
:param reservation_order_id: Order Id of the reservation
:type reservation_order_id: str
:param sources: Format of the resource id should be
/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}
:type sources: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.reservations.models.ReservationResponse]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.reservations.models.ReservationResponse]]]
:raises:
:class:`ErrorException<azure.mgmt.reservations.models.ErrorException>`
|
### Input:
Merges two `Reservation`s.
Merge the specified `Reservation`s into a new `Reservation`. The two
`Reservation`s being merged must have same properties.
:param reservation_order_id: Order Id of the reservation
:type reservation_order_id: str
:param sources: Format of the resource id should be
/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}
:type sources: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.reservations.models.ReservationResponse]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.reservations.models.ReservationResponse]]]
:raises:
:class:`ErrorException<azure.mgmt.reservations.models.ErrorException>`
### Response:
#vtb
def merge(
self, reservation_order_id, sources=None, custom_headers=None, raw=False, polling=True, **operation_config):
raw_result = self._merge_initial(
reservation_order_id=reservation_order_id,
sources=sources,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize(, response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
,
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
#vtb
def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):
return args, kwargs
|
Method called for unsampled wrapped calls. This can e.g. be used to add traceparent headers to the
underlying http call for HTTP instrumentations.
:param module:
:param method:
:param wrapped:
:param instance:
:param args:
:param kwargs:
:param transaction:
:return:
|
### Input:
Method called for unsampled wrapped calls. This can e.g. be used to add traceparent headers to the
underlying http call for HTTP instrumentations.
:param module:
:param method:
:param wrapped:
:param instance:
:param args:
:param kwargs:
:param transaction:
:return:
### Response:
#vtb
def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):
return args, kwargs
|
#vtb
def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time:
month_start_id -= 1
if month_start_id < 1:
month_start_id = 12
self.syear -= 1
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
|
Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
|
### Input:
Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
### Response:
#vtb
def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time:
month_start_id -= 1
if month_start_id < 1:
month_start_id = 12
self.syear -= 1
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time)
|
#vtb
def get_previous_request(rid):
request = None
broker_req = relation_get(attribute=, rid=rid,
unit=local_unit())
if broker_req:
request_data = json.loads(broker_req)
request = CephBrokerRq(api_version=request_data[],
request_id=request_data[])
request.set_ops(request_data[])
return request
|
Return the last ceph broker request sent on a given relation
@param rid: Relation id to query for request
|
### Input:
Return the last ceph broker request sent on a given relation
@param rid: Relation id to query for request
### Response:
#vtb
def get_previous_request(rid):
request = None
broker_req = relation_get(attribute=, rid=rid,
unit=local_unit())
if broker_req:
request_data = json.loads(broker_req)
request = CephBrokerRq(api_version=request_data[],
request_id=request_data[])
request.set_ops(request_data[])
return request
|
#vtb
def _validateurl(self, url):
parsed = urlparse(url)
path = parsed.path.strip("/")
if path:
parts = path.split("/")
url_types = ("admin", "manager", "rest")
if any(i in parts for i in url_types):
while parts.pop() not in url_types:
next
elif "services" in parts:
while parts.pop() not in "services":
next
path = "/".join(parts)
else:
path = "arcgis"
self._adminUrl = "%s://%s/%s/admin" % (parsed.scheme, parsed.netloc, path)
return "%s://%s/%s/rest/services" % (parsed.scheme, parsed.netloc, path)
|
assembles the server url
|
### Input:
assembles the server url
### Response:
#vtb
def _validateurl(self, url):
parsed = urlparse(url)
path = parsed.path.strip("/")
if path:
parts = path.split("/")
url_types = ("admin", "manager", "rest")
if any(i in parts for i in url_types):
while parts.pop() not in url_types:
next
elif "services" in parts:
while parts.pop() not in "services":
next
path = "/".join(parts)
else:
path = "arcgis"
self._adminUrl = "%s://%s/%s/admin" % (parsed.scheme, parsed.netloc, path)
return "%s://%s/%s/rest/services" % (parsed.scheme, parsed.netloc, path)
|
#vtb
def indicator(self, data):
data = self.get_first_hash(data)
super(File, self).indicator(data)
|
Update the request URI to include the Indicator for specific indicator retrieval.
Args:
data (string): The indicator value
|
### Input:
Update the request URI to include the Indicator for specific indicator retrieval.
Args:
data (string): The indicator value
### Response:
#vtb
def indicator(self, data):
data = self.get_first_hash(data)
super(File, self).indicator(data)
|
#vtb
def verify_rank_integrity(self, tax_id, rank, parent_id, children):
def _lower(n1, n2):
return self.ranks.index(n1) < self.ranks.index(n2)
if rank not in self.ranks:
raise TaxonIntegrityError(.format(rank))
parent_rank = self.rank(parent_id)
if not _lower(rank, parent_rank) and rank != self.NO_RANK:
msg = (
)
msg = msg.format(tax_id, rank, parent_id, parent_rank)
raise TaxonIntegrityError(msg)
for child in children:
if not _lower(self.rank(child), rank):
msg =
msg = msg.format(tax_id, child)
raise TaxonIntegrityError(msg)
return True
|
Confirm that for each node the parent ranks and children ranks are
coherent
|
### Input:
Confirm that for each node the parent ranks and children ranks are
coherent
### Response:
#vtb
def verify_rank_integrity(self, tax_id, rank, parent_id, children):
def _lower(n1, n2):
return self.ranks.index(n1) < self.ranks.index(n2)
if rank not in self.ranks:
raise TaxonIntegrityError(.format(rank))
parent_rank = self.rank(parent_id)
if not _lower(rank, parent_rank) and rank != self.NO_RANK:
msg = (
)
msg = msg.format(tax_id, rank, parent_id, parent_rank)
raise TaxonIntegrityError(msg)
for child in children:
if not _lower(self.rank(child), rank):
msg =
msg = msg.format(tax_id, child)
raise TaxonIntegrityError(msg)
return True
|
#vtb
def warn(self, message, *args, **kwargs):
self.log("warn", message, *args, **kwargs)
|
alias to message at warning level
|
### Input:
alias to message at warning level
### Response:
#vtb
def warn(self, message, *args, **kwargs):
self.log("warn", message, *args, **kwargs)
|
#vtb
def to_iso(dt):
if isinstance(dt, datetime):
return to_iso_datetime(dt)
elif isinstance(dt, date):
return to_iso_date(dt)
|
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
|
### Input:
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
### Response:
#vtb
def to_iso(dt):
if isinstance(dt, datetime):
return to_iso_datetime(dt)
elif isinstance(dt, date):
return to_iso_date(dt)
|
#vtb
def count_by_key_impl(sequence):
counter = collections.Counter()
for key, _ in sequence:
counter[key] += 1
return six.viewitems(counter)
|
Implementation for count_by_key_t
:param sequence: sequence of (key, value) pairs
:return: counts by key
|
### Input:
Implementation for count_by_key_t
:param sequence: sequence of (key, value) pairs
:return: counts by key
### Response:
#vtb
def count_by_key_impl(sequence):
counter = collections.Counter()
for key, _ in sequence:
counter[key] += 1
return six.viewitems(counter)
|
#vtb
def autogender(self, api_token=None, genderize_all=False):
name_cache = {}
no_gender = not genderize_all
pattern = re.compile(r"(^\w+)\s\w+")
profiles = api.search_profiles(self.db, no_gender=no_gender)
for profile in profiles:
if not profile.name:
continue
name = profile.name.strip()
m = pattern.match(name)
if not m:
continue
firstname = m.group(1).lower()
if firstname in name_cache:
gender_data = name_cache[firstname]
else:
try:
gender, acc = genderize(firstname, api_token)
except (requests.exceptions.RequestException,
requests.exceptions.RetryError) as e:
msg = "Skipping name (%s) due to a connection error. Error: %s"
msg = msg % (firstname, profile.uuid, str(e))
self.warning(msg)
continue
gender_data = {
: gender,
: acc
}
name_cache[firstname] = gender_data
if not gender_data[]:
continue
try:
api.edit_profile(self.db, profile.uuid, **gender_data)
self.display(,
uuid=profile.uuid, name=profile.name,
gender_data=gender_data)
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS
|
Autocomplete gender information of unique identities.
Autocomplete unique identities gender using genderize.io
API. Only those unique identities without an assigned
gender will be updated unless `genderize_all` option is given.
|
### Input:
Autocomplete gender information of unique identities.
Autocomplete unique identities gender using genderize.io
API. Only those unique identities without an assigned
gender will be updated unless `genderize_all` option is given.
### Response:
#vtb
def autogender(self, api_token=None, genderize_all=False):
name_cache = {}
no_gender = not genderize_all
pattern = re.compile(r"(^\w+)\s\w+")
profiles = api.search_profiles(self.db, no_gender=no_gender)
for profile in profiles:
if not profile.name:
continue
name = profile.name.strip()
m = pattern.match(name)
if not m:
continue
firstname = m.group(1).lower()
if firstname in name_cache:
gender_data = name_cache[firstname]
else:
try:
gender, acc = genderize(firstname, api_token)
except (requests.exceptions.RequestException,
requests.exceptions.RetryError) as e:
msg = "Skipping name (%s) due to a connection error. Error: %s"
msg = msg % (firstname, profile.uuid, str(e))
self.warning(msg)
continue
gender_data = {
: gender,
: acc
}
name_cache[firstname] = gender_data
if not gender_data[]:
continue
try:
api.edit_profile(self.db, profile.uuid, **gender_data)
self.display(,
uuid=profile.uuid, name=profile.name,
gender_data=gender_data)
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS
|
#vtb
def reverse_deployments(deployments=None):
if deployments is None:
deployments = []
reversed_deployments = []
for i in deployments[::-1]:
deployment = copy.deepcopy(i)
for config in [, ]:
if deployment.get(config):
deployment[config] = deployment[config][::-1]
reversed_deployments.append(deployment)
return reversed_deployments
|
Reverse deployments and the modules/regions in them.
|
### Input:
Reverse deployments and the modules/regions in them.
### Response:
#vtb
def reverse_deployments(deployments=None):
if deployments is None:
deployments = []
reversed_deployments = []
for i in deployments[::-1]:
deployment = copy.deepcopy(i)
for config in [, ]:
if deployment.get(config):
deployment[config] = deployment[config][::-1]
reversed_deployments.append(deployment)
return reversed_deployments
|
#vtb
def apply(self, **kwexpr):
for alias, expr in kwexpr.items():
self._projections.append([alias, expr])
return self
|
Specify one or more projection expressions to add to each result
### Parameters
- **kwexpr**: One or more key-value pairs for a projection. The key is
the alias for the projection, and the value is the projection
expression itself, for example `apply(square_root="sqrt(@foo)")`
|
### Input:
Specify one or more projection expressions to add to each result
### Parameters
- **kwexpr**: One or more key-value pairs for a projection. The key is
the alias for the projection, and the value is the projection
expression itself, for example `apply(square_root="sqrt(@foo)")`
### Response:
#vtb
def apply(self, **kwexpr):
for alias, expr in kwexpr.items():
self._projections.append([alias, expr])
return self
|
#vtb
def save(self, msg=None):
if msg is None:
msg = % self.name
log.debug(msg)
self.repo.addItem(self, msg)
|
Modify item data and commit to repo.
Git objects are immutable, to save means adding a new item
:param msg: Commit message.
|
### Input:
Modify item data and commit to repo.
Git objects are immutable, to save means adding a new item
:param msg: Commit message.
### Response:
#vtb
def save(self, msg=None):
if msg is None:
msg = % self.name
log.debug(msg)
self.repo.addItem(self, msg)
|
#vtb
def matrix(
m, n, lst,
m_text: list=None,
n_text: list=None):
fmt = ""
if n_text:
fmt += " {}\n".format(" ".join(n_text))
for i in range(1, m+1):
if m_text:
fmt += "{:<4.4} ".format(m_text[i-1])
fmt += "|"
for j in range(1, n+1):
if (i, j) in lst:
fmt += "x|"
else:
fmt += " |"
fmt += "\n"
return fmt
|
m: row
n: column
lst: items
>>> print(_matrix(2, 3, [(1, 1), (2, 3)]))
|x| | |
| | |x|
|
### Input:
m: row
n: column
lst: items
>>> print(_matrix(2, 3, [(1, 1), (2, 3)]))
|x| | |
| | |x|
### Response:
#vtb
def matrix(
m, n, lst,
m_text: list=None,
n_text: list=None):
fmt = ""
if n_text:
fmt += " {}\n".format(" ".join(n_text))
for i in range(1, m+1):
if m_text:
fmt += "{:<4.4} ".format(m_text[i-1])
fmt += "|"
for j in range(1, n+1):
if (i, j) in lst:
fmt += "x|"
else:
fmt += " |"
fmt += "\n"
return fmt
|
#vtb
def get_queryset(self):
if self.queryset is None:
raise ImproperlyConfigured(
" must define " % self.__class__.__name__)
return self.queryset()
|
Check that the queryset is defined and call it.
|
### Input:
Check that the queryset is defined and call it.
### Response:
#vtb
def get_queryset(self):
if self.queryset is None:
raise ImproperlyConfigured(
" must define " % self.__class__.__name__)
return self.queryset()
|
#vtb
def check_domain(self, service_id, version_number, name):
content = self._fetch("/service/%s/version/%d/domain/%s/check" % (service_id, version_number, name))
return FastlyDomainCheck(self, content)
|
Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly.
|
### Input:
Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly.
### Response:
#vtb
def check_domain(self, service_id, version_number, name):
content = self._fetch("/service/%s/version/%d/domain/%s/check" % (service_id, version_number, name))
return FastlyDomainCheck(self, content)
|
#vtb
async def get_pinstate_report(self, command):
pin = int(command[0])
value = await self.core.get_pin_state(pin)
if value:
reply = json.dumps({"method": "pin_state_reply", "params": value})
else:
reply = json.dumps({"method": "pin_state_reply", "params": "Unknown"})
await self.websocket.send(reply)
|
This method retrieves a Firmata pin_state report for a pin..
See: http://firmata.org/wiki/Protocol#Pin_State_Query
:param command: {"method": "get_pin_state", "params": [PIN]}
:returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]}
|
### Input:
This method retrieves a Firmata pin_state report for a pin..
See: http://firmata.org/wiki/Protocol#Pin_State_Query
:param command: {"method": "get_pin_state", "params": [PIN]}
:returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]}
### Response:
#vtb
async def get_pinstate_report(self, command):
pin = int(command[0])
value = await self.core.get_pin_state(pin)
if value:
reply = json.dumps({"method": "pin_state_reply", "params": value})
else:
reply = json.dumps({"method": "pin_state_reply", "params": "Unknown"})
await self.websocket.send(reply)
|
#vtb
def get_notebook_tab_title(notebook, page_num):
child = notebook.get_nth_page(page_num)
tab_label_eventbox = notebook.get_tab_label(child)
return get_widget_title(tab_label_eventbox.get_tooltip_text())
|
Helper function that gets a notebook's tab title given its page number
:param notebook: The GTK notebook
:param page_num: The page number of the tab, for which the title is required
:return: The title of the tab
|
### Input:
Helper function that gets a notebook's tab title given its page number
:param notebook: The GTK notebook
:param page_num: The page number of the tab, for which the title is required
:return: The title of the tab
### Response:
#vtb
def get_notebook_tab_title(notebook, page_num):
child = notebook.get_nth_page(page_num)
tab_label_eventbox = notebook.get_tab_label(child)
return get_widget_title(tab_label_eventbox.get_tooltip_text())
|
#vtb
def selecttabindex(self, window_name, object_name, tab_index):
children = self._get_tab_children(window_name, object_name)
length = len(children)
if tab_index < 0 or tab_index > length:
raise LdtpServerException(u"Invalid tab index %s" % tab_index)
tab_handle = children[tab_index]
if not tab_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
tab_handle.Press()
return 1
|
Select tab based on index.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param tab_index: tab to select
@type data: integer
@return: 1 on success.
@rtype: integer
|
### Input:
Select tab based on index.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param tab_index: tab to select
@type data: integer
@return: 1 on success.
@rtype: integer
### Response:
#vtb
def selecttabindex(self, window_name, object_name, tab_index):
children = self._get_tab_children(window_name, object_name)
length = len(children)
if tab_index < 0 or tab_index > length:
raise LdtpServerException(u"Invalid tab index %s" % tab_index)
tab_handle = children[tab_index]
if not tab_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
tab_handle.Press()
return 1
|
#vtb
def make_name(self, reserved=[]):
if self.title:
if inspect(self).has_identity:
def checkused(c):
return bool(c in reserved or c in self.reserved_names
or self.__class__.query.filter(self.__class__.id != self.id).filter_by(name=c).notempty())
else:
def checkused(c):
return bool(c in reserved or c in self.reserved_names
or self.__class__.query.filter_by(name=c).notempty())
with self.__class__.query.session.no_autoflush:
self.name = six.text_type(make_name(self.title_for_name, maxlength=self.__name_length__, checkused=checkused))
|
Autogenerates a :attr:`name` from the :attr:`title`. If the auto-generated name is already
in use in this model, :meth:`make_name` tries again by suffixing numbers starting with 2
until an available name is found.
:param reserved: List or set of reserved names unavailable for use
|
### Input:
Autogenerates a :attr:`name` from the :attr:`title`. If the auto-generated name is already
in use in this model, :meth:`make_name` tries again by suffixing numbers starting with 2
until an available name is found.
:param reserved: List or set of reserved names unavailable for use
### Response:
#vtb
def make_name(self, reserved=[]):
if self.title:
if inspect(self).has_identity:
def checkused(c):
return bool(c in reserved or c in self.reserved_names
or self.__class__.query.filter(self.__class__.id != self.id).filter_by(name=c).notempty())
else:
def checkused(c):
return bool(c in reserved or c in self.reserved_names
or self.__class__.query.filter_by(name=c).notempty())
with self.__class__.query.session.no_autoflush:
self.name = six.text_type(make_name(self.title_for_name, maxlength=self.__name_length__, checkused=checkused))
|
#vtb
def accel_move_tab_left(self, *args):
pos = self.get_notebook().get_current_page()
if pos != 0:
self.move_tab(pos, pos - 1)
return True
|
Callback to move a tab to the left
|
### Input:
Callback to move a tab to the left
### Response:
#vtb
def accel_move_tab_left(self, *args):
pos = self.get_notebook().get_current_page()
if pos != 0:
self.move_tab(pos, pos - 1)
return True
|
#vtb
def infos(self, type=None, failed=False):
nodes = self.nodes(failed="all")
infos = []
for n in nodes:
infos.extend(n.infos(type=type, failed=failed))
return infos
|
Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned.
|
### Input:
Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned.
### Response:
#vtb
def infos(self, type=None, failed=False):
nodes = self.nodes(failed="all")
infos = []
for n in nodes:
infos.extend(n.infos(type=type, failed=failed))
return infos
|
#vtb
def _find_usage_cloudtrail(self):
trail_list = self.conn.describe_trails()[]
trail_count = len(trail_list) if trail_list else 0
for trail in trail_list:
data_resource_count = 0
if self.conn._client_config.region_name == trail[]:
response = self.conn.get_event_selectors(
TrailName=trail[]
)
event_selectors = response[]
for event_selector in event_selectors:
data_resource_count += len(
event_selector.get(, [])
)
self.limits[]._add_current_usage(
len(event_selectors),
aws_type=,
resource_id=trail[]
)
self.limits[]._add_current_usage(
data_resource_count,
aws_type=,
resource_id=trail[]
)
else:
logger.debug(
% trail[]
)
self.limits[]._add_current_usage(
trail_count,
aws_type=self.aws_type
)
|
Calculate current usage for CloudTrail related metrics
|
### Input:
Calculate current usage for CloudTrail related metrics
### Response:
#vtb
def _find_usage_cloudtrail(self):
trail_list = self.conn.describe_trails()[]
trail_count = len(trail_list) if trail_list else 0
for trail in trail_list:
data_resource_count = 0
if self.conn._client_config.region_name == trail[]:
response = self.conn.get_event_selectors(
TrailName=trail[]
)
event_selectors = response[]
for event_selector in event_selectors:
data_resource_count += len(
event_selector.get(, [])
)
self.limits[]._add_current_usage(
len(event_selectors),
aws_type=,
resource_id=trail[]
)
self.limits[]._add_current_usage(
data_resource_count,
aws_type=,
resource_id=trail[]
)
else:
logger.debug(
% trail[]
)
self.limits[]._add_current_usage(
trail_count,
aws_type=self.aws_type
)
|
#vtb
def postprocess_result(morphresult, trim_phonetic, trim_compound):
word, analysis = morphresult
return {
: deconvert(word),
: [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis]
}
|
Postprocess vabamorf wrapper output.
|
### Input:
Postprocess vabamorf wrapper output.
### Response:
#vtb
def postprocess_result(morphresult, trim_phonetic, trim_compound):
word, analysis = morphresult
return {
: deconvert(word),
: [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis]
}
|
#vtb
def handle_split(self, asset, ratio):
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
raw_share_count = self.amount / float(ratio)
full_share_count = np.floor(raw_share_count)
fractional_share_count = raw_share_count - full_share_count
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
return return_cash
|
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
|
### Input:
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
### Response:
#vtb
def handle_split(self, asset, ratio):
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
raw_share_count = self.amount / float(ratio)
full_share_count = np.floor(raw_share_count)
fractional_share_count = raw_share_count - full_share_count
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
return return_cash
|
#vtb
def flatten2d(d, key_as_tuple=True, delim=,
list_of_dicts=None):
return flattennd(d, 1, key_as_tuple, delim, list_of_dicts=list_of_dicts)
|
get nested dict as {key:dict,...},
where key is tuple/string of all-1 nested keys
NB: is same as flattennd(d,1,key_as_tuple,delim)
Parameters
----------
d : dict
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flatten2d(d))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flatten2d(d,key_as_tuple=False,delim=','))
{'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}}
|
### Input:
get nested dict as {key:dict,...},
where key is tuple/string of all-1 nested keys
NB: is same as flattennd(d,1,key_as_tuple,delim)
Parameters
----------
d : dict
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flatten2d(d))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flatten2d(d,key_as_tuple=False,delim=','))
{'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}}
### Response:
#vtb
def flatten2d(d, key_as_tuple=True, delim=,
list_of_dicts=None):
return flattennd(d, 1, key_as_tuple, delim, list_of_dicts=list_of_dicts)
|
#vtb
def save_point(self) -> str:
if self._current_mount is left:
msg = self.save_mount_offset()
self._current_mount = right
elif self._current_mount is types.Mount.LEFT:
msg = self.save_mount_offset()
self._current_mount = types.Mount.RIGHT
else:
pos = self._position()[:-1]
self.actual_points[self._current_point] = pos
log.debug("Saving {} for point {}".format(
pos, self._current_point))
msg = .format(
self._current_point, self.actual_points[self._current_point])
return msg
|
Indexes the measured data with the current point as a key and saves the
current position once the 'Enter' key is pressed to the 'actual points'
vector.
|
### Input:
Indexes the measured data with the current point as a key and saves the
current position once the 'Enter' key is pressed to the 'actual points'
vector.
### Response:
#vtb
def save_point(self) -> str:
if self._current_mount is left:
msg = self.save_mount_offset()
self._current_mount = right
elif self._current_mount is types.Mount.LEFT:
msg = self.save_mount_offset()
self._current_mount = types.Mount.RIGHT
else:
pos = self._position()[:-1]
self.actual_points[self._current_point] = pos
log.debug("Saving {} for point {}".format(
pos, self._current_point))
msg = .format(
self._current_point, self.actual_points[self._current_point])
return msg
|
#vtb
def create_storage_policy(policy_name, policy_dict, service_instance=None):
*policy name
log.trace(%s\, policy_name, policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec()
policy_create_spec.resourceType = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE)
policy_dict[] = policy_name
log.trace()
_apply_policy_config(policy_create_spec, policy_dict)
salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec)
return {: True}
|
Creates a storage policy.
Supported capability types: scalar, set, range.
policy_name
Name of the policy to create.
The value of the argument will override any existing name in
``policy_dict``.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict"
|
### Input:
Creates a storage policy.
Supported capability types: scalar, set, range.
policy_name
Name of the policy to create.
The value of the argument will override any existing name in
``policy_dict``.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict"
### Response:
#vtb
def create_storage_policy(policy_name, policy_dict, service_instance=None):
*policy name
log.trace(%s\, policy_name, policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec()
policy_create_spec.resourceType = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE)
policy_dict[] = policy_name
log.trace()
_apply_policy_config(policy_create_spec, policy_dict)
salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec)
return {: True}
|
#vtb
def extract_solvent_accessibility_dssp(in_dssp, path=True):
if path:
with open(in_dssp, ) as inf:
dssp_out = inf.read()
else:
dssp_out = in_dssp[:]
dssp_residues = []
go = False
for line in dssp_out.splitlines():
if go:
try:
res_num = int(line[5:10].strip())
chain = line[10:12].strip()
residue = line[13]
acc = int(line[35:38].strip())
dssp_residues.append([res_num, chain, residue, acc])
except ValueError:
pass
else:
if line[2] == :
go = True
pass
return dssp_residues
|
Uses DSSP to extract solvent accessibilty information on every residue.
Notes
-----
For more information on the solvent accessibility metrics used in dssp, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
In the dssp files value is labeled 'ACC'.
Parameters
----------
in_dssp : str
Path to DSSP file.
path : bool
Indicates if in_dssp is a path or a string.
Returns
-------
dssp_residues : list
Each internal list contains:
[0] int Residue number
[1] str Chain identifier
[2] str Residue type
[3] int dssp solvent accessibilty
|
### Input:
Uses DSSP to extract solvent accessibilty information on every residue.
Notes
-----
For more information on the solvent accessibility metrics used in dssp, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
In the dssp files value is labeled 'ACC'.
Parameters
----------
in_dssp : str
Path to DSSP file.
path : bool
Indicates if in_dssp is a path or a string.
Returns
-------
dssp_residues : list
Each internal list contains:
[0] int Residue number
[1] str Chain identifier
[2] str Residue type
[3] int dssp solvent accessibilty
### Response:
#vtb
def extract_solvent_accessibility_dssp(in_dssp, path=True):
if path:
with open(in_dssp, ) as inf:
dssp_out = inf.read()
else:
dssp_out = in_dssp[:]
dssp_residues = []
go = False
for line in dssp_out.splitlines():
if go:
try:
res_num = int(line[5:10].strip())
chain = line[10:12].strip()
residue = line[13]
acc = int(line[35:38].strip())
dssp_residues.append([res_num, chain, residue, acc])
except ValueError:
pass
else:
if line[2] == :
go = True
pass
return dssp_residues
|
#vtb
def allow(self, comment, content_object, request):
POST = urlencode({
"blog": settings.AKISMET_BLOG.encode("utf-8"),
"user_ip": comment.ip_address,
"user_agent": request.META.get(, "").
encode("utf-8"),
"referrer": request.META.get(, "").
encode("utf-8"),
"comment_author": comment.user_name.encode("utf-8"),
"comment_author_email": comment.user_email.encode("utf-8"),
"comment_author_url": comment.user_url.encode("utf-8"),
"comment_content": comment.comment.encode("utf-8")})
connection = HTTPConnection(AKISMET_URL, AKISMET_PORT)
connection.request("POST", AKISMET_PATH, POST,
{"User-Agent": AKISMET_USERAGENT,
"Content-type":"application/x-www-form-urlencoded"
})
response = connection.getresponse()
status, result = response.status, response.read()
if result == "false":
return True
elif result == "true" and settings.DISCARD_SPAM:
return False
elif result == "true":
comment.is_removed = True
comment.is_public = False
return True
else:
raise AkismetError(status, result)
|
Moderates comments.
|
### Input:
Moderates comments.
### Response:
#vtb
def allow(self, comment, content_object, request):
POST = urlencode({
"blog": settings.AKISMET_BLOG.encode("utf-8"),
"user_ip": comment.ip_address,
"user_agent": request.META.get(, "").
encode("utf-8"),
"referrer": request.META.get(, "").
encode("utf-8"),
"comment_author": comment.user_name.encode("utf-8"),
"comment_author_email": comment.user_email.encode("utf-8"),
"comment_author_url": comment.user_url.encode("utf-8"),
"comment_content": comment.comment.encode("utf-8")})
connection = HTTPConnection(AKISMET_URL, AKISMET_PORT)
connection.request("POST", AKISMET_PATH, POST,
{"User-Agent": AKISMET_USERAGENT,
"Content-type":"application/x-www-form-urlencoded"
})
response = connection.getresponse()
status, result = response.status, response.read()
if result == "false":
return True
elif result == "true" and settings.DISCARD_SPAM:
return False
elif result == "true":
comment.is_removed = True
comment.is_public = False
return True
else:
raise AkismetError(status, result)
|
#vtb
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]):
del weights_fn
predictions = tf.nn.sigmoid(logits)
labels = tf.argmax(labels, -1)
predictions = tf.argmax(predictions, -1)
_, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
return accuracy, tf.constant(1.0)
|
Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights
|
### Input:
Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights
### Response:
#vtb
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]):
del weights_fn
predictions = tf.nn.sigmoid(logits)
labels = tf.argmax(labels, -1)
predictions = tf.argmax(predictions, -1)
_, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
return accuracy, tf.constant(1.0)
|
#vtb
def patch_for_specialized_compiler():
if not in globals():
return
if unpatched:
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
|
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
|
### Input:
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
### Response:
#vtb
def patch_for_specialized_compiler():
if not in globals():
return
if unpatched:
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
|
#vtb
def close_tab(self):
if len(self.tab_pages) > 1:
del self.tab_pages[self.active_tab_index]
self.active_tab_index = max(0, self.active_tab_index - 1)
self._auto_close_new_empty_buffers()
|
Close active tab.
|
### Input:
Close active tab.
### Response:
#vtb
def close_tab(self):
if len(self.tab_pages) > 1:
del self.tab_pages[self.active_tab_index]
self.active_tab_index = max(0, self.active_tab_index - 1)
self._auto_close_new_empty_buffers()
|
#vtb
def s_l(l, alpha):
a2 = alpha**2
c_a = 0.547
s_l = np.sqrt(old_div(((c_a**(2. * l)) * a2), ((l + 1.) * (2. * l + 1.))))
return s_l
|
get sigma as a function of degree l from Constable and Parker (1988)
|
### Input:
get sigma as a function of degree l from Constable and Parker (1988)
### Response:
#vtb
def s_l(l, alpha):
a2 = alpha**2
c_a = 0.547
s_l = np.sqrt(old_div(((c_a**(2. * l)) * a2), ((l + 1.) * (2. * l + 1.))))
return s_l
|
#vtb
def interfaces(self):
self._ifaces = []
wifi_ctrl = wifiutil.WifiUtil()
for interface in wifi_ctrl.interfaces():
iface = Interface(interface)
self._ifaces.append(iface)
self._logger.info("Get interface: %s", iface.name())
if not self._ifaces:
self._logger.error("Can't get wifi interface")
return self._ifaces
|
Collect the available wlan interfaces.
|
### Input:
Collect the available wlan interfaces.
### Response:
#vtb
def interfaces(self):
self._ifaces = []
wifi_ctrl = wifiutil.WifiUtil()
for interface in wifi_ctrl.interfaces():
iface = Interface(interface)
self._ifaces.append(iface)
self._logger.info("Get interface: %s", iface.name())
if not self._ifaces:
self._logger.error("Can't get wifi interface")
return self._ifaces
|
#vtb
def get_hosts_by_explosion(self, hostgroups):
self.already_exploded = True
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hostgroup = hostgroups.find_by_name(hg_mbr.strip())
if hostgroup is not None:
value = hostgroup.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_members(value)
return self.get_hosts()
|
Get hosts of this group
:param hostgroups: Hostgroup object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts of this group
:rtype: list
|
### Input:
Get hosts of this group
:param hostgroups: Hostgroup object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts of this group
:rtype: list
### Response:
#vtb
def get_hosts_by_explosion(self, hostgroups):
self.already_exploded = True
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hostgroup = hostgroups.find_by_name(hg_mbr.strip())
if hostgroup is not None:
value = hostgroup.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_members(value)
return self.get_hosts()
|
#vtb
def user_has_group(user, group, superuser_skip=True):
if user.is_superuser and superuser_skip:
return True
return user.groups.filter(name=group).exists()
|
Check if a user is in a certaing group.
By default, the check is skipped for superusers.
|
### Input:
Check if a user is in a certaing group.
By default, the check is skipped for superusers.
### Response:
#vtb
def user_has_group(user, group, superuser_skip=True):
if user.is_superuser and superuser_skip:
return True
return user.groups.filter(name=group).exists()
|
#vtb
def attach(self, screen):
if self.listener is not None:
warnings.warn("As of version 0.6.0 the listener queue is "
"restricted to a single element. Existing "
"listener {0} will be replaced."
.format(self.listener), DeprecationWarning)
if self.strict:
for event in self.events:
if not hasattr(screen, event):
raise TypeError("{0} is missing {1}".format(screen, event))
self.listener = screen
self._parser = None
self._initialize_parser()
|
Adds a given screen to the listener queue.
:param pyte.screens.Screen screen: a screen to attach to.
|
### Input:
Adds a given screen to the listener queue.
:param pyte.screens.Screen screen: a screen to attach to.
### Response:
#vtb
def attach(self, screen):
if self.listener is not None:
warnings.warn("As of version 0.6.0 the listener queue is "
"restricted to a single element. Existing "
"listener {0} will be replaced."
.format(self.listener), DeprecationWarning)
if self.strict:
for event in self.events:
if not hasattr(screen, event):
raise TypeError("{0} is missing {1}".format(screen, event))
self.listener = screen
self._parser = None
self._initialize_parser()
|
#vtb
def from_json(self, fname):
with open(fname, ) as fp:
for row in json.load(fp):
nn = ServerInfo.from_dict(row)
self[str(nn)] = nn
|
Read contents of a CSV containing a list of servers.
|
### Input:
Read contents of a CSV containing a list of servers.
### Response:
#vtb
def from_json(self, fname):
with open(fname, ) as fp:
for row in json.load(fp):
nn = ServerInfo.from_dict(row)
self[str(nn)] = nn
|
#vtb
def bar_chart_mf(data, path_name):
N = len(data)
ind = np.arange(N)
width = 0.8
fig, ax = pyplot.subplots()
rects1 = ax.bar(ind, data, width, color=)
ax.set_ylabel()
ax.set_xticks(ind+width/2)
labs = [+str(i) for i in range(-N/2+1, N/2+1)]
ax.set_xticklabels(labs)
def autolabel(rects):
for rect in rects:
rect.get_height()
autolabel(rects1)
pyplot.savefig(path_name)
pyplot.close()
|
Make a bar chart for data on MF quantities.
|
### Input:
Make a bar chart for data on MF quantities.
### Response:
#vtb
def bar_chart_mf(data, path_name):
N = len(data)
ind = np.arange(N)
width = 0.8
fig, ax = pyplot.subplots()
rects1 = ax.bar(ind, data, width, color=)
ax.set_ylabel()
ax.set_xticks(ind+width/2)
labs = [+str(i) for i in range(-N/2+1, N/2+1)]
ax.set_xticklabels(labs)
def autolabel(rects):
for rect in rects:
rect.get_height()
autolabel(rects1)
pyplot.savefig(path_name)
pyplot.close()
|
#vtb
def add_state_editor(self, state_m):
state_identifier = self.get_state_identifier(state_m)
if state_identifier in self.closed_tabs:
state_editor_ctrl = self.closed_tabs[state_identifier][]
state_editor_view = state_editor_ctrl.view
handler_id = self.closed_tabs[state_identifier][]
source_code_view_is_dirty = self.closed_tabs[state_identifier][]
del self.closed_tabs[state_identifier]
else:
state_editor_view = StateEditorView()
if isinstance(state_m, LibraryStateModel):
state_editor_view[].set_current_page(
state_editor_view[].page_num(state_editor_view.page_dict["Data Linkage"]))
state_editor_ctrl = StateEditorController(state_m, state_editor_view)
self.add_controller(state_identifier, state_editor_ctrl)
if state_editor_ctrl.get_controller() and state_m.state.get_next_upper_library_root_state() is None:
handler_id = state_editor_view.source_view.get_buffer().connect(, self.script_text_changed,
state_m)
self.view.get_top_widget().connect(, state_editor_view.source_view.on_draw)
else:
handler_id = None
source_code_view_is_dirty = False
(tab, inner_label, sticky_button) = create_tab_header(, self.on_tab_close_clicked,
self.on_toggle_sticky_clicked, state_m)
set_tab_label_texts(inner_label, state_m, source_code_view_is_dirty)
state_editor_view.get_top_widget().title_label = inner_label
state_editor_view.get_top_widget().sticky_button = sticky_button
page_content = state_editor_view.get_top_widget()
page_id = self.view.notebook.prepend_page(page_content, tab)
page = self.view.notebook.get_nth_page(page_id)
self.view.notebook.set_tab_reorderable(page, True)
page.show_all()
self.view.notebook.show()
self.tabs[state_identifier] = {: page, : state_m,
: state_editor_ctrl, : self.model.selected_state_machine_id,
: False,
: source_code_view_is_dirty,
: handler_id}
return page_id
|
Triggered whenever a state is selected.
:param state_m: The selected state model.
|
### Input:
Triggered whenever a state is selected.
:param state_m: The selected state model.
### Response:
#vtb
def add_state_editor(self, state_m):
state_identifier = self.get_state_identifier(state_m)
if state_identifier in self.closed_tabs:
state_editor_ctrl = self.closed_tabs[state_identifier][]
state_editor_view = state_editor_ctrl.view
handler_id = self.closed_tabs[state_identifier][]
source_code_view_is_dirty = self.closed_tabs[state_identifier][]
del self.closed_tabs[state_identifier]
else:
state_editor_view = StateEditorView()
if isinstance(state_m, LibraryStateModel):
state_editor_view[].set_current_page(
state_editor_view[].page_num(state_editor_view.page_dict["Data Linkage"]))
state_editor_ctrl = StateEditorController(state_m, state_editor_view)
self.add_controller(state_identifier, state_editor_ctrl)
if state_editor_ctrl.get_controller() and state_m.state.get_next_upper_library_root_state() is None:
handler_id = state_editor_view.source_view.get_buffer().connect(, self.script_text_changed,
state_m)
self.view.get_top_widget().connect(, state_editor_view.source_view.on_draw)
else:
handler_id = None
source_code_view_is_dirty = False
(tab, inner_label, sticky_button) = create_tab_header(, self.on_tab_close_clicked,
self.on_toggle_sticky_clicked, state_m)
set_tab_label_texts(inner_label, state_m, source_code_view_is_dirty)
state_editor_view.get_top_widget().title_label = inner_label
state_editor_view.get_top_widget().sticky_button = sticky_button
page_content = state_editor_view.get_top_widget()
page_id = self.view.notebook.prepend_page(page_content, tab)
page = self.view.notebook.get_nth_page(page_id)
self.view.notebook.set_tab_reorderable(page, True)
page.show_all()
self.view.notebook.show()
self.tabs[state_identifier] = {: page, : state_m,
: state_editor_ctrl, : self.model.selected_state_machine_id,
: False,
: source_code_view_is_dirty,
: handler_id}
return page_id
|
#vtb
def Verify(self, public_key):
now = rdfvalue.RDFDatetime.Now().AsDatetime()
if now > self._value.not_valid_after:
raise VerificationError("Certificate expired!")
if now < self._value.not_valid_before:
raise VerificationError("Certificate not yet valid!")
public_key.Verify(
self._value.tbs_certificate_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True
|
Verifies the certificate using the given key.
Args:
public_key: The public key to use.
Returns:
True: Everything went well.
Raises:
VerificationError: The certificate did not verify.
|
### Input:
Verifies the certificate using the given key.
Args:
public_key: The public key to use.
Returns:
True: Everything went well.
Raises:
VerificationError: The certificate did not verify.
### Response:
#vtb
def Verify(self, public_key):
now = rdfvalue.RDFDatetime.Now().AsDatetime()
if now > self._value.not_valid_after:
raise VerificationError("Certificate expired!")
if now < self._value.not_valid_before:
raise VerificationError("Certificate not yet valid!")
public_key.Verify(
self._value.tbs_certificate_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True
|
#vtb
def _context(self):
src_stats = {
src: self._src_path_stats(src) for src in self.src_paths()
}
}
|
Return the context to pass to the template.
The context is a dict of the form:
{
'css_url': CSS_URL,
'report_name': REPORT_NAME,
'diff_name': DIFF_NAME,
'src_stats': {SRC_PATH: {
'percent_covered': PERCENT_COVERED,
'violation_lines': [LINE_NUM, ...]
}, ... }
'total_num_lines': TOTAL_NUM_LINES,
'total_num_violations': TOTAL_NUM_VIOLATIONS,
'total_percent_covered': TOTAL_PERCENT_COVERED
}
|
### Input:
Return the context to pass to the template.
The context is a dict of the form:
{
'css_url': CSS_URL,
'report_name': REPORT_NAME,
'diff_name': DIFF_NAME,
'src_stats': {SRC_PATH: {
'percent_covered': PERCENT_COVERED,
'violation_lines': [LINE_NUM, ...]
}, ... }
'total_num_lines': TOTAL_NUM_LINES,
'total_num_violations': TOTAL_NUM_VIOLATIONS,
'total_percent_covered': TOTAL_PERCENT_COVERED
}
### Response:
#vtb
def _context(self):
src_stats = {
src: self._src_path_stats(src) for src in self.src_paths()
}
}
|
#vtb
def get_edge_annotations(self, u, v, key: str) -> Optional[AnnotationsDict]:
return self._get_edge_attr(u, v, key, ANNOTATIONS)
|
Get the annotations for a given edge.
|
### Input:
Get the annotations for a given edge.
### Response:
#vtb
def get_edge_annotations(self, u, v, key: str) -> Optional[AnnotationsDict]:
return self._get_edge_attr(u, v, key, ANNOTATIONS)
|
#vtb
def write_c_string( self, value ):
self.file.write( value )
self.file.write( b )
|
Read a zero terminated (C style) string
|
### Input:
Read a zero terminated (C style) string
### Response:
#vtb
def write_c_string( self, value ):
self.file.write( value )
self.file.write( b )
|
#vtb
def read_namespace_status(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_namespace_status_with_http_info(name, **kwargs)
else:
(data) = self.read_namespace_status_with_http_info(name, **kwargs)
return data
|
read_namespace_status # noqa: E501
read status of the specified Namespace # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespace_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Namespace (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Namespace
If the method is called asynchronously,
returns the request thread.
|
### Input:
read_namespace_status # noqa: E501
read status of the specified Namespace # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespace_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Namespace (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Namespace
If the method is called asynchronously,
returns the request thread.
### Response:
#vtb
def read_namespace_status(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_namespace_status_with_http_info(name, **kwargs)
else:
(data) = self.read_namespace_status_with_http_info(name, **kwargs)
return data
|
#vtb
def from_ISO_8601(cls, date_string, time_string, tz_string):
if tz_string:
tz_offset = (int(tz_string[1:3]) * 60) + int(tz_string[3:])
if tz_string[0] == :
tz_offset = -tz_offset
else:
tz_offset = None
if time_string == :
time_string =
tz_offset = None
datetime_string = date_string + time_string[:13]
precision = min((len(datetime_string) - 2) // 2, 7)
if precision <= 0:
return None
fmt = .join((, , , , , , )[:precision])
return cls(
(datetime.strptime(datetime_string, fmt), precision, tz_offset))
|
Sufficiently general ISO 8601 parser.
Inputs must be in "basic" format, i.e. no '-' or ':' separators.
See https://en.wikipedia.org/wiki/ISO_8601
|
### Input:
Sufficiently general ISO 8601 parser.
Inputs must be in "basic" format, i.e. no '-' or ':' separators.
See https://en.wikipedia.org/wiki/ISO_8601
### Response:
#vtb
def from_ISO_8601(cls, date_string, time_string, tz_string):
if tz_string:
tz_offset = (int(tz_string[1:3]) * 60) + int(tz_string[3:])
if tz_string[0] == :
tz_offset = -tz_offset
else:
tz_offset = None
if time_string == :
time_string =
tz_offset = None
datetime_string = date_string + time_string[:13]
precision = min((len(datetime_string) - 2) // 2, 7)
if precision <= 0:
return None
fmt = .join((, , , , , , )[:precision])
return cls(
(datetime.strptime(datetime_string, fmt), precision, tz_offset))
|
#vtb
def parameters(self, params):
array = JavaArray(jobject=JavaArray.new_instance("weka.core.setupgenerator.AbstractParameter", len(params)))
for idx, obj in enumerate(params):
array[idx] = obj.jobject
javabridge.call(self.jobject, "setParameters", "([Lweka/core/setupgenerator/AbstractParameter;)V", array.jobject)
|
Sets the list of search parameters to use.
:param params: list of AbstractSearchParameter objects
:type params: list
|
### Input:
Sets the list of search parameters to use.
:param params: list of AbstractSearchParameter objects
:type params: list
### Response:
#vtb
def parameters(self, params):
array = JavaArray(jobject=JavaArray.new_instance("weka.core.setupgenerator.AbstractParameter", len(params)))
for idx, obj in enumerate(params):
array[idx] = obj.jobject
javabridge.call(self.jobject, "setParameters", "([Lweka/core/setupgenerator/AbstractParameter;)V", array.jobject)
|
#vtb
def O(self):
N = self.N
tig_to_idx = self.tig_to_idx
O = np.zeros((N, N), dtype=int)
for (at, bt), (strandedness, md, mh) in self.orientations.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
score = strandedness * md
O[ai, bi] = O[bi, ai] = score
return O
|
Pairwise strandedness matrix. Each cell contains whether i-th and j-th
contig are the same orientation +1, or opposite orientation -1.
|
### Input:
Pairwise strandedness matrix. Each cell contains whether i-th and j-th
contig are the same orientation +1, or opposite orientation -1.
### Response:
#vtb
def O(self):
N = self.N
tig_to_idx = self.tig_to_idx
O = np.zeros((N, N), dtype=int)
for (at, bt), (strandedness, md, mh) in self.orientations.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
score = strandedness * md
O[ai, bi] = O[bi, ai] = score
return O
|
#vtb
def add(self, rulefactory):
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
|
Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
|
### Input:
Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
### Response:
#vtb
def add(self, rulefactory):
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
|
#vtb
def comparison(self):
node = self.expr()
while self.token.nature in (
Nature.EQ,
Nature.NE,
Nature.LE,
Nature.GE,
Nature.LT,
Nature.GT,
):
token = self.token
if token.nature == Nature.EQ:
self._process(Nature.EQ)
elif token.nature == Nature.NE:
self._process(Nature.NE)
elif token.nature == Nature.LE:
self._process(Nature.LE)
elif token.nature == Nature.GE:
self._process(Nature.GE)
elif token.nature == Nature.LT:
self._process(Nature.LT)
elif token.nature == Nature.GT:
self._process(Nature.GT)
else:
self.error()
node = BinaryOperation(left=node, op=token, right=self.expr())
return node
|
comparison: expr (('==' | '!=' | '<=' | '>=' | '<' | '>') expr)*
|
### Input:
comparison: expr (('==' | '!=' | '<=' | '>=' | '<' | '>') expr)*
### Response:
#vtb
def comparison(self):
node = self.expr()
while self.token.nature in (
Nature.EQ,
Nature.NE,
Nature.LE,
Nature.GE,
Nature.LT,
Nature.GT,
):
token = self.token
if token.nature == Nature.EQ:
self._process(Nature.EQ)
elif token.nature == Nature.NE:
self._process(Nature.NE)
elif token.nature == Nature.LE:
self._process(Nature.LE)
elif token.nature == Nature.GE:
self._process(Nature.GE)
elif token.nature == Nature.LT:
self._process(Nature.LT)
elif token.nature == Nature.GT:
self._process(Nature.GT)
else:
self.error()
node = BinaryOperation(left=node, op=token, right=self.expr())
return node
|
#vtb
def create_quiz_report(self, quiz_id, course_id, quiz_report_report_type, include=None, quiz_report_includes_all_versions=None):
path = {}
data = {}
params = {}
path["course_id"] = course_id
path["quiz_id"] = quiz_id
self._validate_enum(quiz_report_report_type, ["student_analysis", "item_analysis"])
data["quiz_report[report_type]"] = quiz_report_report_type
if quiz_report_includes_all_versions is not None:
data["quiz_report[includes_all_versions]"] = quiz_report_includes_all_versions
if include is not None:
self._validate_enum(include, ["file", "progress"])
data["include"] = include
self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, single_item=True)
|
Create a quiz report.
Create and return a new report for this quiz. If a previously
generated report matches the arguments and is still current (i.e.
there have been no new submissions), it will be returned.
*Responses*
* <code>400 Bad Request</code> if the specified report type is invalid
* <code>409 Conflict</code> if a quiz report of the specified type is already being
generated
|
### Input:
Create a quiz report.
Create and return a new report for this quiz. If a previously
generated report matches the arguments and is still current (i.e.
there have been no new submissions), it will be returned.
*Responses*
* <code>400 Bad Request</code> if the specified report type is invalid
* <code>409 Conflict</code> if a quiz report of the specified type is already being
generated
### Response:
#vtb
def create_quiz_report(self, quiz_id, course_id, quiz_report_report_type, include=None, quiz_report_includes_all_versions=None):
path = {}
data = {}
params = {}
path["course_id"] = course_id
path["quiz_id"] = quiz_id
self._validate_enum(quiz_report_report_type, ["student_analysis", "item_analysis"])
data["quiz_report[report_type]"] = quiz_report_report_type
if quiz_report_includes_all_versions is not None:
data["quiz_report[includes_all_versions]"] = quiz_report_includes_all_versions
if include is not None:
self._validate_enum(include, ["file", "progress"])
data["include"] = include
self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, single_item=True)
|
#vtb
def getinfo(self):
try:
old_getinfo = AuthServiceProxy(self.__service_url, , self.__timeout, self.__conn, True)
res = old_getinfo()
if not in res:
return res
except JSONRPCException:
pass
network_info = self.getnetworkinfo()
blockchain_info = self.getblockchaininfo()
try:
wallet_info = self.getwalletinfo()
except:
wallet_info = {
: None,
: None,
: None,
: None,
: None,
}
res = {
: network_info[],
: network_info[],
: wallet_info[],
: wallet_info[],
: blockchain_info[],
: network_info[],
: network_info[],
: network_info[],
: blockchain_info[],
: blockchain_info[] == ,
: wallet_info[],
: wallet_info[],
: wallet_info[],
: network_info[],
}
for k in [, , ]:
if wallet_info.has_key(k):
res[k] = wallet_info[k]
return res
|
Backwards-compatibility for 0.14 and later
|
### Input:
Backwards-compatibility for 0.14 and later
### Response:
#vtb
def getinfo(self):
try:
old_getinfo = AuthServiceProxy(self.__service_url, , self.__timeout, self.__conn, True)
res = old_getinfo()
if not in res:
return res
except JSONRPCException:
pass
network_info = self.getnetworkinfo()
blockchain_info = self.getblockchaininfo()
try:
wallet_info = self.getwalletinfo()
except:
wallet_info = {
: None,
: None,
: None,
: None,
: None,
}
res = {
: network_info[],
: network_info[],
: wallet_info[],
: wallet_info[],
: blockchain_info[],
: network_info[],
: network_info[],
: network_info[],
: blockchain_info[],
: blockchain_info[] == ,
: wallet_info[],
: wallet_info[],
: wallet_info[],
: network_info[],
}
for k in [, , ]:
if wallet_info.has_key(k):
res[k] = wallet_info[k]
return res
|
#vtb
def _get_randomized_range(val,
provided_range,
default_range):
if val is None:
if provided_range is None:
return default_range
else:
return provided_range
else:
if provided_range is not None:
raise ValueError(
.format(str(val), str(provided_range)))
return [val]
|
Helper to initialize by either value or a range
Returns a range to randomize from
|
### Input:
Helper to initialize by either value or a range
Returns a range to randomize from
### Response:
#vtb
def _get_randomized_range(val,
provided_range,
default_range):
if val is None:
if provided_range is None:
return default_range
else:
return provided_range
else:
if provided_range is not None:
raise ValueError(
.format(str(val), str(provided_range)))
return [val]
|
#vtb
def find_newline(self, size=-1):
if size < 0:
return self._buffer.find(, self._offset)
return self._buffer.find(, self._offset, self._offset + size)
|
Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
|
### Input:
Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
### Response:
#vtb
def find_newline(self, size=-1):
if size < 0:
return self._buffer.find(, self._offset)
return self._buffer.find(, self._offset, self._offset + size)
|
#vtb
def get_rate(self, zipcode, city=None, state=None, multiple_rates=False):
data = self.make_request_data(zipcode, city, state)
r = requests.get(self.url, params=data)
resp = r.json()
return self.process_response(resp, multiple_rates)
|
Finds sales tax for given info.
Returns Decimal of the tax rate, e.g. 8.750.
|
### Input:
Finds sales tax for given info.
Returns Decimal of the tax rate, e.g. 8.750.
### Response:
#vtb
def get_rate(self, zipcode, city=None, state=None, multiple_rates=False):
data = self.make_request_data(zipcode, city, state)
r = requests.get(self.url, params=data)
resp = r.json()
return self.process_response(resp, multiple_rates)
|
#vtb
def load_data_old(self):
units = ""
if len(self.file_objects) == 1 and self.file_objects[0] is not None:
data = self.file_objects[0].variables[self.variable][self.forecast_hours]
if hasattr(self.file_objects[0].variables[self.variable], "units"):
units = self.file_objects[0].variables[self.variable].units
elif len(self.file_objects) > 1:
grid_shape = [len(self.file_objects), 1, 1]
for file_object in self.file_objects:
if file_object is not None:
if self.variable in file_object.variables.keys():
grid_shape = file_object.variables[self.variable].shape
elif self.variable.ljust(6, "_") in file_object.variables.keys():
grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape
else:
print("{0} not found".format(self.variable))
raise KeyError
break
data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2]))
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if self.variable in file_object.variables.keys():
var_name = self.variable
elif self.variable.ljust(6, "_") in file_object.variables.keys():
var_name = self.variable.ljust(6, "_")
else:
print("{0} not found".format(self.variable))
raise KeyError
data[f] = file_object.variables[var_name][0]
if units == "" and hasattr(file_object.variables[var_name], "units"):
units = file_object.variables[var_name].units
else:
data = None
return data, units
|
Loads time series of 2D data grids from each opened file. The code
handles loading a full time series from one file or individual time steps
from multiple files. Missing files are supported.
|
### Input:
Loads time series of 2D data grids from each opened file. The code
handles loading a full time series from one file or individual time steps
from multiple files. Missing files are supported.
### Response:
#vtb
def load_data_old(self):
units = ""
if len(self.file_objects) == 1 and self.file_objects[0] is not None:
data = self.file_objects[0].variables[self.variable][self.forecast_hours]
if hasattr(self.file_objects[0].variables[self.variable], "units"):
units = self.file_objects[0].variables[self.variable].units
elif len(self.file_objects) > 1:
grid_shape = [len(self.file_objects), 1, 1]
for file_object in self.file_objects:
if file_object is not None:
if self.variable in file_object.variables.keys():
grid_shape = file_object.variables[self.variable].shape
elif self.variable.ljust(6, "_") in file_object.variables.keys():
grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape
else:
print("{0} not found".format(self.variable))
raise KeyError
break
data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2]))
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if self.variable in file_object.variables.keys():
var_name = self.variable
elif self.variable.ljust(6, "_") in file_object.variables.keys():
var_name = self.variable.ljust(6, "_")
else:
print("{0} not found".format(self.variable))
raise KeyError
data[f] = file_object.variables[var_name][0]
if units == "" and hasattr(file_object.variables[var_name], "units"):
units = file_object.variables[var_name].units
else:
data = None
return data, units
|
#vtb
def to_range(obj, score=None, id=None, strand=None):
from jcvi.utils.range import Range
if score or id:
_score = score if score else obj.score
_id = id if id else obj.id
return Range(seqid=obj.seqid, start=obj.start, end=obj.end, \
score=_score, id=_id)
elif strand:
return (obj.seqid, obj.start, obj.end, obj.strand)
return (obj.seqid, obj.start, obj.end)
|
Given a gffutils object, convert it to a range object
|
### Input:
Given a gffutils object, convert it to a range object
### Response:
#vtb
def to_range(obj, score=None, id=None, strand=None):
from jcvi.utils.range import Range
if score or id:
_score = score if score else obj.score
_id = id if id else obj.id
return Range(seqid=obj.seqid, start=obj.start, end=obj.end, \
score=_score, id=_id)
elif strand:
return (obj.seqid, obj.start, obj.end, obj.strand)
return (obj.seqid, obj.start, obj.end)
|
#vtb
def parse(inp, format=None, encoding=, force_types=True):
proper_inp = inp
if hasattr(inp, ):
proper_inp = inp.read()
if isinstance(proper_inp, six.text_type):
proper_inp = proper_inp.encode(encoding)
fname = None
if hasattr(inp, ):
fname = inp.name
fmt = _get_format(format, fname, proper_inp)
proper_inp = six.BytesIO(proper_inp)
try:
res = _do_parse(proper_inp, fmt, encoding, force_types)
except Exception as e:
raise AnyMarkupError(e, traceback.format_exc())
if res is None:
res = {}
return res
|
Parse input from file-like object, unicode string or byte string.
Args:
inp: file-like object, unicode string or byte string with the markup
format: explicitly override the guessed `inp` markup format
encoding: `inp` encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed input (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing or inp
|
### Input:
Parse input from file-like object, unicode string or byte string.
Args:
inp: file-like object, unicode string or byte string with the markup
format: explicitly override the guessed `inp` markup format
encoding: `inp` encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed input (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing or inp
### Response:
#vtb
def parse(inp, format=None, encoding=, force_types=True):
proper_inp = inp
if hasattr(inp, ):
proper_inp = inp.read()
if isinstance(proper_inp, six.text_type):
proper_inp = proper_inp.encode(encoding)
fname = None
if hasattr(inp, ):
fname = inp.name
fmt = _get_format(format, fname, proper_inp)
proper_inp = six.BytesIO(proper_inp)
try:
res = _do_parse(proper_inp, fmt, encoding, force_types)
except Exception as e:
raise AnyMarkupError(e, traceback.format_exc())
if res is None:
res = {}
return res
|
#vtb
def list(self, size=1000, tree_depth=1):
if not tree_depth: return self._map_type()
return list(self.deque(size, tree_depth-1) for x in range(size))
|
Creates a random #list
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|[value1, value2]|
2=|[[value1, value2], [value1, value2]]|
-> random #list
|
### Input:
Creates a random #list
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|[value1, value2]|
2=|[[value1, value2], [value1, value2]]|
-> random #list
### Response:
#vtb
def list(self, size=1000, tree_depth=1):
if not tree_depth: return self._map_type()
return list(self.deque(size, tree_depth-1) for x in range(size))
|
#vtb
def printArray(self, node, printState: PrintState):
if (
self.nameMapper[node["name"]] not in printState.definedVars
and self.nameMapper[node["name"]] not in printState.globalVars
):
printState.definedVars += [self.nameMapper[node["name"]]]
assert int(node["count"]) > 0
printState.definedVars += [node["name"]]
varType = ""
if node["type"].upper() == "INTEGER":
varType = "int"
elif node["type"].upper() in ("DOUBLE", "REAL"):
varType = "float"
elif node["type"].upper() == "CHARACTER":
varType = "str"
elif node["isDevTypeVar"]:
varType = node["type"].lower() + "()"
assert varType != ""
self.pyStrings.append(f"{node[]} = Array({varType}, [")
for i in range(0, int(node["count"])):
loBound = node["low" + str(i + 1)]
upBound = node["up" + str(i + 1)]
dimensions = f"({loBound}, {upBound})"
if i < int(node["count"]) - 1:
self.pyStrings.append(f"{dimensions}, ")
else:
self.pyStrings.append(f"{dimensions}")
self.pyStrings.append("])")
if node["isDevTypeVar"]:
self.pyStrings.append(printState.sep)
upBound = node["up1"]
self.pyStrings.append(
f"for z in range(1, {upBound}+1):" + printState.sep
)
self.pyStrings.append(
f" obj = {node[]}()" + printState.sep
)
self.pyStrings.append(
f" {node[]}.set_(z, obj)" + printState.sep
)
|
Prints out the array declaration in a format of Array class
object declaration. 'arrayName = Array(Type, [bounds])'
|
### Input:
Prints out the array declaration in a format of Array class
object declaration. 'arrayName = Array(Type, [bounds])'
### Response:
#vtb
def printArray(self, node, printState: PrintState):
if (
self.nameMapper[node["name"]] not in printState.definedVars
and self.nameMapper[node["name"]] not in printState.globalVars
):
printState.definedVars += [self.nameMapper[node["name"]]]
assert int(node["count"]) > 0
printState.definedVars += [node["name"]]
varType = ""
if node["type"].upper() == "INTEGER":
varType = "int"
elif node["type"].upper() in ("DOUBLE", "REAL"):
varType = "float"
elif node["type"].upper() == "CHARACTER":
varType = "str"
elif node["isDevTypeVar"]:
varType = node["type"].lower() + "()"
assert varType != ""
self.pyStrings.append(f"{node[]} = Array({varType}, [")
for i in range(0, int(node["count"])):
loBound = node["low" + str(i + 1)]
upBound = node["up" + str(i + 1)]
dimensions = f"({loBound}, {upBound})"
if i < int(node["count"]) - 1:
self.pyStrings.append(f"{dimensions}, ")
else:
self.pyStrings.append(f"{dimensions}")
self.pyStrings.append("])")
if node["isDevTypeVar"]:
self.pyStrings.append(printState.sep)
upBound = node["up1"]
self.pyStrings.append(
f"for z in range(1, {upBound}+1):" + printState.sep
)
self.pyStrings.append(
f" obj = {node[]}()" + printState.sep
)
self.pyStrings.append(
f" {node[]}.set_(z, obj)" + printState.sep
)
|
#vtb
def zeros_like(array, dtype=None, keepmeta=True):
if keepmeta:
return xr.zeros_like(array, dtype)
else:
return dc.zeros(array.shape, dtype)
|
Create an array of zeros with the same shape and type as the input array.
Args:
array (xarray.DataArray): The shape and data-type of it define
these same attributes of the output array.
dtype (data-type, optional): If specified, this function overrides
the data-type of the output array.
keepmeta (bool, optional): Whether *coords, attrs, and name of the input
array are kept in the output one. Default is True.
Returns:
array (decode.array): Decode array filled with zeros.
|
### Input:
Create an array of zeros with the same shape and type as the input array.
Args:
array (xarray.DataArray): The shape and data-type of it define
these same attributes of the output array.
dtype (data-type, optional): If specified, this function overrides
the data-type of the output array.
keepmeta (bool, optional): Whether *coords, attrs, and name of the input
array are kept in the output one. Default is True.
Returns:
array (decode.array): Decode array filled with zeros.
### Response:
#vtb
def zeros_like(array, dtype=None, keepmeta=True):
if keepmeta:
return xr.zeros_like(array, dtype)
else:
return dc.zeros(array.shape, dtype)
|
#vtb
def to_representation(self, instance):
updated_course = copy.deepcopy(instance)
enterprise_customer_catalog = self.context[]
updated_course[] = enterprise_customer_catalog.get_course_enrollment_url(
updated_course[]
)
for course_run in updated_course[]:
course_run[] = enterprise_customer_catalog.get_course_run_enrollment_url(
course_run[]
)
return updated_course
|
Return the updated course data dictionary.
Arguments:
instance (dict): The course data.
Returns:
dict: The updated course data.
|
### Input:
Return the updated course data dictionary.
Arguments:
instance (dict): The course data.
Returns:
dict: The updated course data.
### Response:
#vtb
def to_representation(self, instance):
updated_course = copy.deepcopy(instance)
enterprise_customer_catalog = self.context[]
updated_course[] = enterprise_customer_catalog.get_course_enrollment_url(
updated_course[]
)
for course_run in updated_course[]:
course_run[] = enterprise_customer_catalog.get_course_run_enrollment_url(
course_run[]
)
return updated_course
|
#vtb
def change_cloud_password(
self,
current_password: str,
new_password: str,
new_hint: str = ""
) -> bool:
r = self.send(functions.account.GetPassword())
if not r.has_password:
raise ValueError("There is no cloud password to change")
r.new_algo.salt1 += os.urandom(32)
new_hash = btoi(compute_hash(r.new_algo, new_password))
new_hash = itob(pow(r.new_algo.g, new_hash, btoi(r.new_algo.p)))
self.send(
functions.account.UpdatePasswordSettings(
password=compute_check(r, current_password),
new_settings=types.account.PasswordInputSettings(
new_algo=r.new_algo,
new_password_hash=new_hash,
hint=new_hint
)
)
)
return True
|
Use this method to change your Two-Step Verification password (Cloud Password) with a new one.
Args:
current_password (``str``):
Your current password.
new_password (``str``):
Your new password.
new_hint (``str``, *optional*):
A new password hint.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` in case there is no cloud password to change.
|
### Input:
Use this method to change your Two-Step Verification password (Cloud Password) with a new one.
Args:
current_password (``str``):
Your current password.
new_password (``str``):
Your new password.
new_hint (``str``, *optional*):
A new password hint.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` in case there is no cloud password to change.
### Response:
#vtb
def change_cloud_password(
self,
current_password: str,
new_password: str,
new_hint: str = ""
) -> bool:
r = self.send(functions.account.GetPassword())
if not r.has_password:
raise ValueError("There is no cloud password to change")
r.new_algo.salt1 += os.urandom(32)
new_hash = btoi(compute_hash(r.new_algo, new_password))
new_hash = itob(pow(r.new_algo.g, new_hash, btoi(r.new_algo.p)))
self.send(
functions.account.UpdatePasswordSettings(
password=compute_check(r, current_password),
new_settings=types.account.PasswordInputSettings(
new_algo=r.new_algo,
new_password_hash=new_hash,
hint=new_hint
)
)
)
return True
|
#vtb
def sludge(self, column=None, value=None, **kwargs):
return self._resolve_call(, column, value, **kwargs)
|
Sludge information describes the volumn of sludge produced at a
facility, identification information on a sludge handler, and
classification/permitting information on a facility that handles
sludge, such as a pretreatment POTW.
>>> PCS().sludge('county_name', 'San Francisco')
|
### Input:
Sludge information describes the volumn of sludge produced at a
facility, identification information on a sludge handler, and
classification/permitting information on a facility that handles
sludge, such as a pretreatment POTW.
>>> PCS().sludge('county_name', 'San Francisco')
### Response:
#vtb
def sludge(self, column=None, value=None, **kwargs):
return self._resolve_call(, column, value, **kwargs)
|
#vtb
def __split_file(self):
if (self.__filename and os.access(self.__filename, os.R_OK)):
fhandle = None
try:
fhandle = os.open(self.__filename, os.O_RDONLY)
except OSError:
print(("CouldnSplit by day found'
self.__splitpointers.append(sfpos)
try:
sfpos = sarmap.find(PATTERN_MULTISPLIT, (sfpos + 1))
except ValueError:
print("ValueError on mmap.find()")
return True
if (self.__splitpointers):
return True
return False
|
Splits combined SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point
|
### Input:
Splits combined SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point
### Response:
#vtb
def __split_file(self):
if (self.__filename and os.access(self.__filename, os.R_OK)):
fhandle = None
try:
fhandle = os.open(self.__filename, os.O_RDONLY)
except OSError:
print(("CouldnSplit by day found'
self.__splitpointers.append(sfpos)
try:
sfpos = sarmap.find(PATTERN_MULTISPLIT, (sfpos + 1))
except ValueError:
print("ValueError on mmap.find()")
return True
if (self.__splitpointers):
return True
return False
|
#vtb
def __validate_datetime_string(self):
try:
try:
StrictVersion(self._value)
raise TypeConversionError(
"invalid datetime string: version string found {}".format(self._value)
)
except ValueError:
pass
except TypeError:
raise TypeConversionError("invalid datetime string: type={}".format(type(self._value)))
|
This will require validating version string (such as "3.3.5").
A version string could be converted to a datetime value if this
validation is not executed.
|
### Input:
This will require validating version string (such as "3.3.5").
A version string could be converted to a datetime value if this
validation is not executed.
### Response:
#vtb
def __validate_datetime_string(self):
try:
try:
StrictVersion(self._value)
raise TypeConversionError(
"invalid datetime string: version string found {}".format(self._value)
)
except ValueError:
pass
except TypeError:
raise TypeConversionError("invalid datetime string: type={}".format(type(self._value)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.