code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_all_keys(self):
"""Get all keys indexed.
:return: All keys
:rtype: list(str)
"""
all_keys = []
for keys in self._index.values():
all_keys.extend(keys)
return all_keys | Get all keys indexed.
:return: All keys
:rtype: list(str) | Below is the the instruction that describes the task:
### Input:
Get all keys indexed.
:return: All keys
:rtype: list(str)
### Response:
def get_all_keys(self):
"""Get all keys indexed.
:return: All keys
:rtype: list(str)
"""
all_keys = []
for keys in self._index.values():
all_keys.extend(keys)
return all_keys |
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
"""
Diff two CSV files, returning the patch which transforms one into the
other.
"""
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns) | Diff two CSV files, returning the patch which transforms one into the
other. | Below is the the instruction that describes the task:
### Input:
Diff two CSV files, returning the patch which transforms one into the
other.
### Response:
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
"""
Diff two CSV files, returning the patch which transforms one into the
other.
"""
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns) |
def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT):
"""Set or update data in the KV store without overwriting.
:param path: Path
:type path: str | unicode
:param secret: The contents of the "secret" dict will be stored and returned on read.
:type secret: dict
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the create_or_update_secret request.
:rtype: dict
"""
# First, do a read.
try:
current_secret_version = self.read_secret_version(
path=path,
mount_point=mount_point,
)
except exceptions.InvalidPath:
raise exceptions.InvalidPath('No value found at "{path}"; patch only works on existing data.'.format(path=path))
# Update existing secret dict.
patched_secret = current_secret_version['data']['data']
patched_secret.update(secret)
# Write back updated secret.
return self.create_or_update_secret(
path=path,
cas=current_secret_version['data']['metadata']['version'],
secret=patched_secret,
mount_point=mount_point,
) | Set or update data in the KV store without overwriting.
:param path: Path
:type path: str | unicode
:param secret: The contents of the "secret" dict will be stored and returned on read.
:type secret: dict
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the create_or_update_secret request.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Set or update data in the KV store without overwriting.
:param path: Path
:type path: str | unicode
:param secret: The contents of the "secret" dict will be stored and returned on read.
:type secret: dict
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the create_or_update_secret request.
:rtype: dict
### Response:
def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT):
"""Set or update data in the KV store without overwriting.
:param path: Path
:type path: str | unicode
:param secret: The contents of the "secret" dict will be stored and returned on read.
:type secret: dict
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the create_or_update_secret request.
:rtype: dict
"""
# First, do a read.
try:
current_secret_version = self.read_secret_version(
path=path,
mount_point=mount_point,
)
except exceptions.InvalidPath:
raise exceptions.InvalidPath('No value found at "{path}"; patch only works on existing data.'.format(path=path))
# Update existing secret dict.
patched_secret = current_secret_version['data']['data']
patched_secret.update(secret)
# Write back updated secret.
return self.create_or_update_secret(
path=path,
cas=current_secret_version['data']['metadata']['version'],
secret=patched_secret,
mount_point=mount_point,
) |
def _isbool(string):
"""
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
"""
return isinstance(string, _bool_type) or\
(isinstance(string, (_binary_type, _text_type))
and
string in ("True", "False")) | >>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False | Below is the the instruction that describes the task:
### Input:
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
### Response:
def _isbool(string):
"""
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
"""
return isinstance(string, _bool_type) or\
(isinstance(string, (_binary_type, _text_type))
and
string in ("True", "False")) |
def build(self, *args, **kwargs):
"""
Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist.
"""
# Create dirs for input, output and tmp data.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(self.make_input())
self.manager.write_jobfile(self) | Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist. | Below is the the instruction that describes the task:
### Input:
Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist.
### Response:
def build(self, *args, **kwargs):
"""
Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist.
"""
# Create dirs for input, output and tmp data.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(self.make_input())
self.manager.write_jobfile(self) |
def examine(self):
"""Check if there are changes, if true, run the given task."""
if self._changes:
return self._changes.pop()
# clean filepath
self.filepath = None
delays = set([0])
for path in self._tasks:
item = self._tasks[path]
if self.is_changed(path, item['ignore']):
func = item['func']
func and func()
delay = item['delay']
if delay:
delays.add(delay)
if 'forever' in delays:
delay = 'forever'
else:
delay = max(delays)
return self.filepath, delay | Check if there are changes, if true, run the given task. | Below is the the instruction that describes the task:
### Input:
Check if there are changes, if true, run the given task.
### Response:
def examine(self):
"""Check if there are changes, if true, run the given task."""
if self._changes:
return self._changes.pop()
# clean filepath
self.filepath = None
delays = set([0])
for path in self._tasks:
item = self._tasks[path]
if self.is_changed(path, item['ignore']):
func = item['func']
func and func()
delay = item['delay']
if delay:
delays.add(delay)
if 'forever' in delays:
delay = 'forever'
else:
delay = max(delays)
return self.filepath, delay |
def create_subnetwork(kwargs=None, call=None):
'''
... versionadded:: 2017.7.0
Create a GCE Subnetwork. Must specify name, cidr, network, and region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_subnetwork gce name=mysubnet network=mynet1 region=us-west1 cidr=10.0.0.0/24 description=optional
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_subnetwork function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'Must specify name of subnet.'
)
return False
if 'network' not in kwargs:
log.errror(
'Must specify name of network to create subnet under.'
)
return False
if 'cidr' not in kwargs:
log.errror(
'A network CIDR range must be specified when creating a subnet.'
)
return False
if 'region' not in kwargs:
log.error(
'A region must be specified when creating a subnetwork.'
)
return False
name = kwargs['name']
cidr = kwargs['cidr']
network = kwargs['network']
region = kwargs['region']
desc = kwargs.get('description', None)
conn = get_conn()
__utils__['cloud.fire_event'](
'event',
'create subnetwork',
'salt/cloud/subnet/creating',
args={
'name': name,
'network': network,
'cidr': cidr,
'region': region,
'description': desc
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
subnet = conn.ex_create_subnetwork(name, cidr, network, region, desc)
__utils__['cloud.fire_event'](
'event',
'created subnetwork',
'salt/cloud/subnet/created',
args={
'name': name,
'network': network,
'cidr': cidr,
'region': region,
'description': desc
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_item(subnet) | ... versionadded:: 2017.7.0
Create a GCE Subnetwork. Must specify name, cidr, network, and region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_subnetwork gce name=mysubnet network=mynet1 region=us-west1 cidr=10.0.0.0/24 description=optional | Below is the the instruction that describes the task:
### Input:
... versionadded:: 2017.7.0
Create a GCE Subnetwork. Must specify name, cidr, network, and region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_subnetwork gce name=mysubnet network=mynet1 region=us-west1 cidr=10.0.0.0/24 description=optional
### Response:
def create_subnetwork(kwargs=None, call=None):
'''
... versionadded:: 2017.7.0
Create a GCE Subnetwork. Must specify name, cidr, network, and region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_subnetwork gce name=mysubnet network=mynet1 region=us-west1 cidr=10.0.0.0/24 description=optional
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_subnetwork function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'Must specify name of subnet.'
)
return False
if 'network' not in kwargs:
log.errror(
'Must specify name of network to create subnet under.'
)
return False
if 'cidr' not in kwargs:
log.errror(
'A network CIDR range must be specified when creating a subnet.'
)
return False
if 'region' not in kwargs:
log.error(
'A region must be specified when creating a subnetwork.'
)
return False
name = kwargs['name']
cidr = kwargs['cidr']
network = kwargs['network']
region = kwargs['region']
desc = kwargs.get('description', None)
conn = get_conn()
__utils__['cloud.fire_event'](
'event',
'create subnetwork',
'salt/cloud/subnet/creating',
args={
'name': name,
'network': network,
'cidr': cidr,
'region': region,
'description': desc
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
subnet = conn.ex_create_subnetwork(name, cidr, network, region, desc)
__utils__['cloud.fire_event'](
'event',
'created subnetwork',
'salt/cloud/subnet/created',
args={
'name': name,
'network': network,
'cidr': cidr,
'region': region,
'description': desc
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_item(subnet) |
def dependent_phone_numbers(self):
"""
Access the dependent_phone_numbers
:returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList
:rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList
"""
if self._dependent_phone_numbers is None:
self._dependent_phone_numbers = DependentPhoneNumberList(
self._version,
account_sid=self._solution['account_sid'],
address_sid=self._solution['sid'],
)
return self._dependent_phone_numbers | Access the dependent_phone_numbers
:returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList
:rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList | Below is the the instruction that describes the task:
### Input:
Access the dependent_phone_numbers
:returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList
:rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList
### Response:
def dependent_phone_numbers(self):
"""
Access the dependent_phone_numbers
:returns: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList
:rtype: twilio.rest.api.v2010.account.address.dependent_phone_number.DependentPhoneNumberList
"""
if self._dependent_phone_numbers is None:
self._dependent_phone_numbers = DependentPhoneNumberList(
self._version,
account_sid=self._solution['account_sid'],
address_sid=self._solution['sid'],
)
return self._dependent_phone_numbers |
def format_help(help):
"""Formats the help string."""
help = help.replace("Options:", str(crayons.normal("Options:", bold=True)))
help = help.replace(
"Usage: pipenv", str("Usage: {0}".format(crayons.normal("pipenv", bold=True)))
)
help = help.replace(" check", str(crayons.red(" check", bold=True)))
help = help.replace(" clean", str(crayons.red(" clean", bold=True)))
help = help.replace(" graph", str(crayons.red(" graph", bold=True)))
help = help.replace(" install", str(crayons.magenta(" install", bold=True)))
help = help.replace(" lock", str(crayons.green(" lock", bold=True)))
help = help.replace(" open", str(crayons.red(" open", bold=True)))
help = help.replace(" run", str(crayons.yellow(" run", bold=True)))
help = help.replace(" shell", str(crayons.yellow(" shell", bold=True)))
help = help.replace(" sync", str(crayons.green(" sync", bold=True)))
help = help.replace(" uninstall", str(crayons.magenta(" uninstall", bold=True)))
help = help.replace(" update", str(crayons.green(" update", bold=True)))
additional_help = """
Usage Examples:
Create a new project using Python 3.7, specifically:
$ {1}
Remove project virtualenv (inferred from current directory):
$ {9}
Install all dependencies for a project (including dev):
$ {2}
Create a lockfile containing pre-releases:
$ {6}
Show a graph of your installed dependencies:
$ {4}
Check your installed dependencies for security vulnerabilities:
$ {7}
Install a local setup.py into your virtual environment/Pipfile:
$ {5}
Use a lower-level pip command:
$ {8}
Commands:""".format(
crayons.red("pipenv --three"),
crayons.red("pipenv --python 3.7"),
crayons.red("pipenv install --dev"),
crayons.red("pipenv lock"),
crayons.red("pipenv graph"),
crayons.red("pipenv install -e ."),
crayons.red("pipenv lock --pre"),
crayons.red("pipenv check"),
crayons.red("pipenv run pip freeze"),
crayons.red("pipenv --rm"),
)
help = help.replace("Commands:", additional_help)
return help | Formats the help string. | Below is the the instruction that describes the task:
### Input:
Formats the help string.
### Response:
def format_help(help):
"""Formats the help string."""
help = help.replace("Options:", str(crayons.normal("Options:", bold=True)))
help = help.replace(
"Usage: pipenv", str("Usage: {0}".format(crayons.normal("pipenv", bold=True)))
)
help = help.replace(" check", str(crayons.red(" check", bold=True)))
help = help.replace(" clean", str(crayons.red(" clean", bold=True)))
help = help.replace(" graph", str(crayons.red(" graph", bold=True)))
help = help.replace(" install", str(crayons.magenta(" install", bold=True)))
help = help.replace(" lock", str(crayons.green(" lock", bold=True)))
help = help.replace(" open", str(crayons.red(" open", bold=True)))
help = help.replace(" run", str(crayons.yellow(" run", bold=True)))
help = help.replace(" shell", str(crayons.yellow(" shell", bold=True)))
help = help.replace(" sync", str(crayons.green(" sync", bold=True)))
help = help.replace(" uninstall", str(crayons.magenta(" uninstall", bold=True)))
help = help.replace(" update", str(crayons.green(" update", bold=True)))
additional_help = """
Usage Examples:
Create a new project using Python 3.7, specifically:
$ {1}
Remove project virtualenv (inferred from current directory):
$ {9}
Install all dependencies for a project (including dev):
$ {2}
Create a lockfile containing pre-releases:
$ {6}
Show a graph of your installed dependencies:
$ {4}
Check your installed dependencies for security vulnerabilities:
$ {7}
Install a local setup.py into your virtual environment/Pipfile:
$ {5}
Use a lower-level pip command:
$ {8}
Commands:""".format(
crayons.red("pipenv --three"),
crayons.red("pipenv --python 3.7"),
crayons.red("pipenv install --dev"),
crayons.red("pipenv lock"),
crayons.red("pipenv graph"),
crayons.red("pipenv install -e ."),
crayons.red("pipenv lock --pre"),
crayons.red("pipenv check"),
crayons.red("pipenv run pip freeze"),
crayons.red("pipenv --rm"),
)
help = help.replace("Commands:", additional_help)
return help |
def to_xml(self):
"""
Serialize all properties as XML
"""
ret = '<exif>'
for k in self.__dict__:
ret += '<%s>%s</%s>' % (k, self.__dict__[k], k)
ret += '</exif>'
return ret | Serialize all properties as XML | Below is the the instruction that describes the task:
### Input:
Serialize all properties as XML
### Response:
def to_xml(self):
"""
Serialize all properties as XML
"""
ret = '<exif>'
for k in self.__dict__:
ret += '<%s>%s</%s>' % (k, self.__dict__[k], k)
ret += '</exif>'
return ret |
def getSerialDebugEnabled(self):
"""Returns True if enabled, False if disabled"""
command = '$GE'
settings = self.sendCommand(command)
flags = int(settings[2], 16)
return not (flags & 0x0080) | Returns True if enabled, False if disabled | Below is the the instruction that describes the task:
### Input:
Returns True if enabled, False if disabled
### Response:
def getSerialDebugEnabled(self):
"""Returns True if enabled, False if disabled"""
command = '$GE'
settings = self.sendCommand(command)
flags = int(settings[2], 16)
return not (flags & 0x0080) |
def modify(self, modification, parameters):
"""
Apply a single modification to an MFD parameters.
Reflects the modification method and calls it passing ``parameters``
as keyword arguments. See also :attr:`MODIFICATIONS`.
Modifications can be applied one on top of another. The logic
of stacking modifications is up to a specific MFD implementation.
:param modification:
String name representing the type of modification.
:param parameters:
Dictionary of parameters needed for modification.
:raises ValueError:
If ``modification`` is missing from :attr:`MODIFICATIONS`.
"""
if modification not in self.MODIFICATIONS:
raise ValueError('Modification %s is not supported by %s' %
(modification, type(self).__name__))
meth = getattr(self, 'modify_%s' % modification)
meth(**parameters)
self.check_constraints() | Apply a single modification to an MFD parameters.
Reflects the modification method and calls it passing ``parameters``
as keyword arguments. See also :attr:`MODIFICATIONS`.
Modifications can be applied one on top of another. The logic
of stacking modifications is up to a specific MFD implementation.
:param modification:
String name representing the type of modification.
:param parameters:
Dictionary of parameters needed for modification.
:raises ValueError:
If ``modification`` is missing from :attr:`MODIFICATIONS`. | Below is the the instruction that describes the task:
### Input:
Apply a single modification to an MFD parameters.
Reflects the modification method and calls it passing ``parameters``
as keyword arguments. See also :attr:`MODIFICATIONS`.
Modifications can be applied one on top of another. The logic
of stacking modifications is up to a specific MFD implementation.
:param modification:
String name representing the type of modification.
:param parameters:
Dictionary of parameters needed for modification.
:raises ValueError:
If ``modification`` is missing from :attr:`MODIFICATIONS`.
### Response:
def modify(self, modification, parameters):
"""
Apply a single modification to an MFD parameters.
Reflects the modification method and calls it passing ``parameters``
as keyword arguments. See also :attr:`MODIFICATIONS`.
Modifications can be applied one on top of another. The logic
of stacking modifications is up to a specific MFD implementation.
:param modification:
String name representing the type of modification.
:param parameters:
Dictionary of parameters needed for modification.
:raises ValueError:
If ``modification`` is missing from :attr:`MODIFICATIONS`.
"""
if modification not in self.MODIFICATIONS:
raise ValueError('Modification %s is not supported by %s' %
(modification, type(self).__name__))
meth = getattr(self, 'modify_%s' % modification)
meth(**parameters)
self.check_constraints() |
def structure_from_abivars(cls=None, *args, **kwargs):
"""
Build a :class:`Structure` object from a dictionary with ABINIT variables.
Args:
cls: Structure class to be instantiated. pymatgen.core.structure.Structure if cls is None
example:
al_structure = structure_from_abivars(
acell=3*[7.5],
rprim=[0.0, 0.5, 0.5,
0.5, 0.0, 0.5,
0.5, 0.5, 0.0],
typat=1,
xred=[0.0, 0.0, 0.0],
ntypat=1,
znucl=13,
)
`xred` can be replaced with `xcart` or `xangst`.
"""
kwargs.update(dict(*args))
d = kwargs
cls = Structure if cls is None else cls
#lattice = Lattice.from_dict(d, fmt="abivars")
lattice = lattice_from_abivars(**d)
coords, coords_are_cartesian = d.get("xred", None), False
if coords is None:
coords = d.get("xcart", None)
if coords is not None:
if "xangst" in d:
raise ValueError("xangst and xcart are mutually exclusive")
coords = ArrayWithUnit(coords, "bohr").to("ang")
else:
coords = d.get("xangst", None)
coords_are_cartesian = True
if coords is None:
raise ValueError("Cannot extract coordinates from:\n %s" % str(d))
coords = np.reshape(coords, (-1,3))
znucl_type, typat = d["znucl"], d["typat"]
if not isinstance(znucl_type, collections.abc.Iterable):
znucl_type = [znucl_type]
if not isinstance(typat, collections.abc.Iterable):
typat = [typat]
if len(typat) != len(coords):
raise ValueError("len(typat) != len(coords):\ntypat: %s\ncoords: %s" % (typat, coords))
# Note conversion to int and Fortran --> C indexing
typat = np.array(typat, dtype=np.int)
species = [znucl_type[typ-1] for typ in typat]
return cls(lattice, species, coords, validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=coords_are_cartesian) | Build a :class:`Structure` object from a dictionary with ABINIT variables.
Args:
cls: Structure class to be instantiated. pymatgen.core.structure.Structure if cls is None
example:
al_structure = structure_from_abivars(
acell=3*[7.5],
rprim=[0.0, 0.5, 0.5,
0.5, 0.0, 0.5,
0.5, 0.5, 0.0],
typat=1,
xred=[0.0, 0.0, 0.0],
ntypat=1,
znucl=13,
)
`xred` can be replaced with `xcart` or `xangst`. | Below is the the instruction that describes the task:
### Input:
Build a :class:`Structure` object from a dictionary with ABINIT variables.
Args:
cls: Structure class to be instantiated. pymatgen.core.structure.Structure if cls is None
example:
al_structure = structure_from_abivars(
acell=3*[7.5],
rprim=[0.0, 0.5, 0.5,
0.5, 0.0, 0.5,
0.5, 0.5, 0.0],
typat=1,
xred=[0.0, 0.0, 0.0],
ntypat=1,
znucl=13,
)
`xred` can be replaced with `xcart` or `xangst`.
### Response:
def structure_from_abivars(cls=None, *args, **kwargs):
"""
Build a :class:`Structure` object from a dictionary with ABINIT variables.
Args:
cls: Structure class to be instantiated. pymatgen.core.structure.Structure if cls is None
example:
al_structure = structure_from_abivars(
acell=3*[7.5],
rprim=[0.0, 0.5, 0.5,
0.5, 0.0, 0.5,
0.5, 0.5, 0.0],
typat=1,
xred=[0.0, 0.0, 0.0],
ntypat=1,
znucl=13,
)
`xred` can be replaced with `xcart` or `xangst`.
"""
kwargs.update(dict(*args))
d = kwargs
cls = Structure if cls is None else cls
#lattice = Lattice.from_dict(d, fmt="abivars")
lattice = lattice_from_abivars(**d)
coords, coords_are_cartesian = d.get("xred", None), False
if coords is None:
coords = d.get("xcart", None)
if coords is not None:
if "xangst" in d:
raise ValueError("xangst and xcart are mutually exclusive")
coords = ArrayWithUnit(coords, "bohr").to("ang")
else:
coords = d.get("xangst", None)
coords_are_cartesian = True
if coords is None:
raise ValueError("Cannot extract coordinates from:\n %s" % str(d))
coords = np.reshape(coords, (-1,3))
znucl_type, typat = d["znucl"], d["typat"]
if not isinstance(znucl_type, collections.abc.Iterable):
znucl_type = [znucl_type]
if not isinstance(typat, collections.abc.Iterable):
typat = [typat]
if len(typat) != len(coords):
raise ValueError("len(typat) != len(coords):\ntypat: %s\ncoords: %s" % (typat, coords))
# Note conversion to int and Fortran --> C indexing
typat = np.array(typat, dtype=np.int)
species = [znucl_type[typ-1] for typ in typat]
return cls(lattice, species, coords, validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=coords_are_cartesian) |
def flip(a, axis):
"""Reverse the order of elements in an array along the given axis.
This function is a backport of `numpy.flip` introduced in NumPy 1.12.
See Also
--------
numpy.flip
"""
if not hasattr(a, 'ndim'):
a = np.asarray(a)
indexer = [slice(None)] * a.ndim
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError('axis={} is invalid for the {}-dimensional input '
'array'.format(axis, a.ndim))
return a[tuple(indexer)] | Reverse the order of elements in an array along the given axis.
This function is a backport of `numpy.flip` introduced in NumPy 1.12.
See Also
--------
numpy.flip | Below is the the instruction that describes the task:
### Input:
Reverse the order of elements in an array along the given axis.
This function is a backport of `numpy.flip` introduced in NumPy 1.12.
See Also
--------
numpy.flip
### Response:
def flip(a, axis):
"""Reverse the order of elements in an array along the given axis.
This function is a backport of `numpy.flip` introduced in NumPy 1.12.
See Also
--------
numpy.flip
"""
if not hasattr(a, 'ndim'):
a = np.asarray(a)
indexer = [slice(None)] * a.ndim
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError('axis={} is invalid for the {}-dimensional input '
'array'.format(axis, a.ndim))
return a[tuple(indexer)] |
def _send_ffs(self, pid, n_blocks, fr):
"""Send a flood-fill start packet.
The cores and regions that the application should be loaded to will be
specified by a stream of flood-fill core select packets (FFCS).
"""
sfr = fr | (1 << 31)
self._send_scp(
255, 255, 0, SCPCommands.nearest_neighbour_packet,
(NNCommands.flood_fill_start << 24) | (pid << 16) |
(n_blocks << 8), 0x0, sfr
) | Send a flood-fill start packet.
The cores and regions that the application should be loaded to will be
specified by a stream of flood-fill core select packets (FFCS). | Below is the the instruction that describes the task:
### Input:
Send a flood-fill start packet.
The cores and regions that the application should be loaded to will be
specified by a stream of flood-fill core select packets (FFCS).
### Response:
def _send_ffs(self, pid, n_blocks, fr):
"""Send a flood-fill start packet.
The cores and regions that the application should be loaded to will be
specified by a stream of flood-fill core select packets (FFCS).
"""
sfr = fr | (1 << 31)
self._send_scp(
255, 255, 0, SCPCommands.nearest_neighbour_packet,
(NNCommands.flood_fill_start << 24) | (pid << 16) |
(n_blocks << 8), 0x0, sfr
) |
def restore_session(self, cookie):
"""Establish databse connection using permanent session cookie"""
log.debug("Restoring session from cookie: {}".format(cookie))
opener = build_opener()
opener.addheaders.append(('Cookie', cookie))
self._opener = opener | Establish databse connection using permanent session cookie | Below is the the instruction that describes the task:
### Input:
Establish databse connection using permanent session cookie
### Response:
def restore_session(self, cookie):
"""Establish databse connection using permanent session cookie"""
log.debug("Restoring session from cookie: {}".format(cookie))
opener = build_opener()
opener.addheaders.append(('Cookie', cookie))
self._opener = opener |
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter) | Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog` | Below is the the instruction that describes the task:
### Input:
Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
### Response:
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter) |
def get_events_with_cluster_size(event_number, cluster_size, condition='cluster_size==1'):
'''Selects the events with cluster of a given cluster size.
Parameters
----------
event_number : numpy.array
cluster_size : numpy.array
condition : string
Returns
-------
numpy.array
'''
logging.debug("Calculate events with clusters with " + condition)
return np.unique(event_number[ne.evaluate(condition)]) | Selects the events with cluster of a given cluster size.
Parameters
----------
event_number : numpy.array
cluster_size : numpy.array
condition : string
Returns
-------
numpy.array | Below is the the instruction that describes the task:
### Input:
Selects the events with cluster of a given cluster size.
Parameters
----------
event_number : numpy.array
cluster_size : numpy.array
condition : string
Returns
-------
numpy.array
### Response:
def get_events_with_cluster_size(event_number, cluster_size, condition='cluster_size==1'):
'''Selects the events with cluster of a given cluster size.
Parameters
----------
event_number : numpy.array
cluster_size : numpy.array
condition : string
Returns
-------
numpy.array
'''
logging.debug("Calculate events with clusters with " + condition)
return np.unique(event_number[ne.evaluate(condition)]) |
def trajectory_set_item(self, idx, value):
"""
:param self: mdtraj.Trajectory
:param idx: possible slices over frames,
:param value:
:return:
"""
import mdtraj
assert isinstance(self, mdtraj.Trajectory), type(self)
if not isinstance(value, mdtraj.Trajectory):
raise TypeError("value to assign is of incorrect type(%s). Should be mdtraj.Trajectory" % type(value))
idx = np.index_exp[idx]
frames, atoms = None, None
if isinstance(idx, (list, tuple)):
if len(idx) == 1:
frames, atoms = idx[0], slice(None, None, None)
if len(idx) == 2:
frames, atoms = idx[0], idx[1]
if len(idx) >= 3 or len(idx) == 0:
raise IndexError("invalid slice by %s" % idx)
self.xyz[frames, atoms] = value.xyz
self._time[frames] = value.time
self.unitcell_lengths[frames] = value.unitcell_lengths
self.unitcell_angles[frames] = value.unitcell_angles | :param self: mdtraj.Trajectory
:param idx: possible slices over frames,
:param value:
:return: | Below is the the instruction that describes the task:
### Input:
:param self: mdtraj.Trajectory
:param idx: possible slices over frames,
:param value:
:return:
### Response:
def trajectory_set_item(self, idx, value):
"""
:param self: mdtraj.Trajectory
:param idx: possible slices over frames,
:param value:
:return:
"""
import mdtraj
assert isinstance(self, mdtraj.Trajectory), type(self)
if not isinstance(value, mdtraj.Trajectory):
raise TypeError("value to assign is of incorrect type(%s). Should be mdtraj.Trajectory" % type(value))
idx = np.index_exp[idx]
frames, atoms = None, None
if isinstance(idx, (list, tuple)):
if len(idx) == 1:
frames, atoms = idx[0], slice(None, None, None)
if len(idx) == 2:
frames, atoms = idx[0], idx[1]
if len(idx) >= 3 or len(idx) == 0:
raise IndexError("invalid slice by %s" % idx)
self.xyz[frames, atoms] = value.xyz
self._time[frames] = value.time
self.unitcell_lengths[frames] = value.unitcell_lengths
self.unitcell_angles[frames] = value.unitcell_angles |
def get_history(self):
"""get all msg_ids, ordered by time submitted."""
msg_ids = self._records.keys()
return sorted(msg_ids, key=lambda m: self._records[m]['submitted']) | get all msg_ids, ordered by time submitted. | Below is the the instruction that describes the task:
### Input:
get all msg_ids, ordered by time submitted.
### Response:
def get_history(self):
"""get all msg_ids, ordered by time submitted."""
msg_ids = self._records.keys()
return sorted(msg_ids, key=lambda m: self._records[m]['submitted']) |
def update(self, params=None, client=c):
"""Push params to OnShape and synchronize the local copy
"""
uri = self.parent.uri
if not params or not self.res:
self.get_params()
return
d = self.payload
for k, v in params.items():
m = d["currentConfiguration"][self.parameter_map[k]]["message"]
if isinstance(v, bool) or isinstance(v, str):
m["value"] = v
else:
try:
m["expression"] = str(v)
except KeyError:
m["value"] = str(v)
res = client.update_configuration(uri.did, uri.wvm, uri.eid, json.dumps(d))
# If it was a good request, update config to be consistent with online.
if res.status_code == 200:
self.res = res | Push params to OnShape and synchronize the local copy | Below is the the instruction that describes the task:
### Input:
Push params to OnShape and synchronize the local copy
### Response:
def update(self, params=None, client=c):
"""Push params to OnShape and synchronize the local copy
"""
uri = self.parent.uri
if not params or not self.res:
self.get_params()
return
d = self.payload
for k, v in params.items():
m = d["currentConfiguration"][self.parameter_map[k]]["message"]
if isinstance(v, bool) or isinstance(v, str):
m["value"] = v
else:
try:
m["expression"] = str(v)
except KeyError:
m["value"] = str(v)
res = client.update_configuration(uri.did, uri.wvm, uri.eid, json.dumps(d))
# If it was a good request, update config to be consistent with online.
if res.status_code == 200:
self.res = res |
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group | Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]] | Below is the the instruction that describes the task:
### Input:
Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
### Response:
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group |
def add_f2str(self, dcts, srcfld, dstfld, dstfmt):
"""Add a namedtuple field of type string generated from an existing namedtuple field."""
# Example: f2str = objntmgr.add_f2str(dcts, "p_fdr_bh", "s_fdr_bh", "{:8.2e}")
# ntobj = self.get_ntobj()
# print(ntobj)
assert len(dcts) == len(self.nts)
for dct, ntgoea in zip(dcts, self.nts):
valorig = getattr(ntgoea, srcfld)
valstr = dstfmt.format(valorig)
dct[dstfld] = valstr | Add a namedtuple field of type string generated from an existing namedtuple field. | Below is the the instruction that describes the task:
### Input:
Add a namedtuple field of type string generated from an existing namedtuple field.
### Response:
def add_f2str(self, dcts, srcfld, dstfld, dstfmt):
"""Add a namedtuple field of type string generated from an existing namedtuple field."""
# Example: f2str = objntmgr.add_f2str(dcts, "p_fdr_bh", "s_fdr_bh", "{:8.2e}")
# ntobj = self.get_ntobj()
# print(ntobj)
assert len(dcts) == len(self.nts)
for dct, ntgoea in zip(dcts, self.nts):
valorig = getattr(ntgoea, srcfld)
valstr = dstfmt.format(valorig)
dct[dstfld] = valstr |
def _get_model_meta_options(self) -> List[MetaOption]:
""""
Define fields allowed in the Meta class on end-user models, and the
behavior of each.
Custom ModelMetaOptions classes should override this method to customize
the options supported on class Meta of end-user models.
"""
# we can't use current_app to determine if we're under test, because it
# doesn't exist yet
testing_options = ([] if os.getenv('FLASK_ENV', False) != TEST
else [_TestingMetaOption()])
# when options require another option, its dependent must be listed.
# options in this list are not order-dependent, except where noted.
# all ColumnMetaOptions subclasses require PolymorphicMetaOption
return testing_options + [
AbstractMetaOption(), # required; must be first
LazyMappedMetaOption(),
RelationshipsMetaOption(), # requires lazy_mapped
TableMetaOption(),
MaterializedViewForMetaOption(),
PolymorphicMetaOption(), # must be first of all polymorphic options
PolymorphicOnColumnMetaOption(),
PolymorphicIdentityMetaOption(),
PolymorphicBaseTablenameMetaOption(),
PolymorphicJoinedPkColumnMetaOption(), # requires _BaseTablename
# must be after PolymorphicJoinedPkColumnMetaOption
PrimaryKeyColumnMetaOption(),
CreatedAtColumnMetaOption(),
UpdatedAtColumnMetaOption(),
] | Define fields allowed in the Meta class on end-user models, and the
behavior of each.
Custom ModelMetaOptions classes should override this method to customize
the options supported on class Meta of end-user models. | Below is the the instruction that describes the task:
### Input:
Define fields allowed in the Meta class on end-user models, and the
behavior of each.
Custom ModelMetaOptions classes should override this method to customize
the options supported on class Meta of end-user models.
### Response:
def _get_model_meta_options(self) -> List[MetaOption]:
""""
Define fields allowed in the Meta class on end-user models, and the
behavior of each.
Custom ModelMetaOptions classes should override this method to customize
the options supported on class Meta of end-user models.
"""
# we can't use current_app to determine if we're under test, because it
# doesn't exist yet
testing_options = ([] if os.getenv('FLASK_ENV', False) != TEST
else [_TestingMetaOption()])
# when options require another option, its dependent must be listed.
# options in this list are not order-dependent, except where noted.
# all ColumnMetaOptions subclasses require PolymorphicMetaOption
return testing_options + [
AbstractMetaOption(), # required; must be first
LazyMappedMetaOption(),
RelationshipsMetaOption(), # requires lazy_mapped
TableMetaOption(),
MaterializedViewForMetaOption(),
PolymorphicMetaOption(), # must be first of all polymorphic options
PolymorphicOnColumnMetaOption(),
PolymorphicIdentityMetaOption(),
PolymorphicBaseTablenameMetaOption(),
PolymorphicJoinedPkColumnMetaOption(), # requires _BaseTablename
# must be after PolymorphicJoinedPkColumnMetaOption
PrimaryKeyColumnMetaOption(),
CreatedAtColumnMetaOption(),
UpdatedAtColumnMetaOption(),
] |
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir) | Run the doxygen make commands | Below is the the instruction that describes the task:
### Input:
Run the doxygen make commands
### Response:
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir) |
def _create_axes(hist: HistogramBase, vega: dict, kwargs: dict):
"""Create axes in the figure."""
xlabel = kwargs.pop("xlabel", hist.axis_names[0])
ylabel = kwargs.pop("ylabel", hist.axis_names[1] if len(hist.axis_names) >= 2 else None)
vega["axes"] = [
{"orient": "bottom", "scale": "xscale", "title": xlabel},
{"orient": "left", "scale": "yscale", "title": ylabel}
] | Create axes in the figure. | Below is the the instruction that describes the task:
### Input:
Create axes in the figure.
### Response:
def _create_axes(hist: HistogramBase, vega: dict, kwargs: dict):
"""Create axes in the figure."""
xlabel = kwargs.pop("xlabel", hist.axis_names[0])
ylabel = kwargs.pop("ylabel", hist.axis_names[1] if len(hist.axis_names) >= 2 else None)
vega["axes"] = [
{"orient": "bottom", "scale": "xscale", "title": xlabel},
{"orient": "left", "scale": "yscale", "title": ylabel}
] |
def setTreeDoc(self, tree):
"""update all nodes under the tree to point to the right
document """
if tree is None: tree__o = None
else: tree__o = tree._o
libxml2mod.xmlSetTreeDoc(tree__o, self._o) | update all nodes under the tree to point to the right
document | Below is the the instruction that describes the task:
### Input:
update all nodes under the tree to point to the right
document
### Response:
def setTreeDoc(self, tree):
"""update all nodes under the tree to point to the right
document """
if tree is None: tree__o = None
else: tree__o = tree._o
libxml2mod.xmlSetTreeDoc(tree__o, self._o) |
def _finish_operation_action(self, action):
"""Finish an attempted operation.
Args:
action (ConnectionAction): the action object describing the result
of the operation that we are finishing
"""
success = action.data['success']
conn_key = action.data['id']
if self._get_connection_state(conn_key) != self.InProgress:
self._logger.error("Invalid finish_operation action on a connection whose state is not InProgress, conn_key=%s", str(conn_key))
return
# Cannot be None since we checked above to make sure it exists
data = self._get_connection(conn_key)
callback = data['callback']
conn_id = data['conn_id']
args = action.data['callback_args']
data['state'] = self.Idle
data['microstate'] = None
callback(conn_id, self.id, success, *args) | Finish an attempted operation.
Args:
action (ConnectionAction): the action object describing the result
of the operation that we are finishing | Below is the the instruction that describes the task:
### Input:
Finish an attempted operation.
Args:
action (ConnectionAction): the action object describing the result
of the operation that we are finishing
### Response:
def _finish_operation_action(self, action):
"""Finish an attempted operation.
Args:
action (ConnectionAction): the action object describing the result
of the operation that we are finishing
"""
success = action.data['success']
conn_key = action.data['id']
if self._get_connection_state(conn_key) != self.InProgress:
self._logger.error("Invalid finish_operation action on a connection whose state is not InProgress, conn_key=%s", str(conn_key))
return
# Cannot be None since we checked above to make sure it exists
data = self._get_connection(conn_key)
callback = data['callback']
conn_id = data['conn_id']
args = action.data['callback_args']
data['state'] = self.Idle
data['microstate'] = None
callback(conn_id, self.id, success, *args) |
def _query(self):
'''
Query the broadcast for defined services.
:return:
'''
query = salt.utils.stringutils.to_bytes(
"{}{}".format(self.signature, time.time()))
self._socket.sendto(query, ('<broadcast>', self.port))
return query | Query the broadcast for defined services.
:return: | Below is the the instruction that describes the task:
### Input:
Query the broadcast for defined services.
:return:
### Response:
def _query(self):
'''
Query the broadcast for defined services.
:return:
'''
query = salt.utils.stringutils.to_bytes(
"{}{}".format(self.signature, time.time()))
self._socket.sendto(query, ('<broadcast>', self.port))
return query |
def plot(self, **kwds):
r"""The plotting function for MOT fields."""
plo = self.lx.plot(dist_to_center=3, **kwds)
plo += self.ly.plot(dist_to_center=3, **kwds)
plo += self.lz.plot(dist_to_center=3, **kwds)
plo += self.lx_r.plot(dist_to_center=3, **kwds)
plo += self.ly_r.plot(dist_to_center=3, **kwds)
plo += self.lz_r.plot(dist_to_center=3, **kwds)
return plo | r"""The plotting function for MOT fields. | Below is the the instruction that describes the task:
### Input:
r"""The plotting function for MOT fields.
### Response:
def plot(self, **kwds):
r"""The plotting function for MOT fields."""
plo = self.lx.plot(dist_to_center=3, **kwds)
plo += self.ly.plot(dist_to_center=3, **kwds)
plo += self.lz.plot(dist_to_center=3, **kwds)
plo += self.lx_r.plot(dist_to_center=3, **kwds)
plo += self.ly_r.plot(dist_to_center=3, **kwds)
plo += self.lz_r.plot(dist_to_center=3, **kwds)
return plo |
def find_inherited_key_completions(rootpath, root_env):
"""Return completion keys from INHERITED tuples.
Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple,
then enumerate the keys that are NOT in the rightmost tuple.
"""
tup = inflate_context_tuple(rootpath, root_env)
if isinstance(tup, runtime.CompositeTuple):
keys = set(k for t in tup.tuples[:-1] for k in t.keys())
return {n: get_completion(tup, n) for n in keys}
return {} | Return completion keys from INHERITED tuples.
Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple,
then enumerate the keys that are NOT in the rightmost tuple. | Below is the the instruction that describes the task:
### Input:
Return completion keys from INHERITED tuples.
Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple,
then enumerate the keys that are NOT in the rightmost tuple.
### Response:
def find_inherited_key_completions(rootpath, root_env):
"""Return completion keys from INHERITED tuples.
Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple,
then enumerate the keys that are NOT in the rightmost tuple.
"""
tup = inflate_context_tuple(rootpath, root_env)
if isinstance(tup, runtime.CompositeTuple):
keys = set(k for t in tup.tuples[:-1] for k in t.keys())
return {n: get_completion(tup, n) for n in keys}
return {} |
def set_widgets(self):
"""Set widgets on the Aggregation Layer from Canvas tab."""
# The list is already populated in the previous step, but now we
# need to do it again in case we're back from the Keyword Wizard.
# First, preserve self.parent.layer before clearing the list
last_layer = self.parent.layer and self.parent.layer.id() or None
self.lblDescribeCanvasAggLayer.clear()
self.list_compatible_canvas_layers()
self.auto_select_one_item(self.lstCanvasAggLayers)
# Try to select the last_layer, if found:
if last_layer:
layers = []
for indx in range(self.lstCanvasAggLayers.count()):
item = self.lstCanvasAggLayers.item(indx)
layers += [item.data(QtCore.Qt.UserRole)]
if last_layer in layers:
self.lstCanvasAggLayers.setCurrentRow(layers.index(last_layer))
# Set icon
self.lblIconIFCWAggregationFromCanvas.setPixmap(QPixmap(None)) | Set widgets on the Aggregation Layer from Canvas tab. | Below is the the instruction that describes the task:
### Input:
Set widgets on the Aggregation Layer from Canvas tab.
### Response:
def set_widgets(self):
"""Set widgets on the Aggregation Layer from Canvas tab."""
# The list is already populated in the previous step, but now we
# need to do it again in case we're back from the Keyword Wizard.
# First, preserve self.parent.layer before clearing the list
last_layer = self.parent.layer and self.parent.layer.id() or None
self.lblDescribeCanvasAggLayer.clear()
self.list_compatible_canvas_layers()
self.auto_select_one_item(self.lstCanvasAggLayers)
# Try to select the last_layer, if found:
if last_layer:
layers = []
for indx in range(self.lstCanvasAggLayers.count()):
item = self.lstCanvasAggLayers.item(indx)
layers += [item.data(QtCore.Qt.UserRole)]
if last_layer in layers:
self.lstCanvasAggLayers.setCurrentRow(layers.index(last_layer))
# Set icon
self.lblIconIFCWAggregationFromCanvas.setPixmap(QPixmap(None)) |
def _set_perms(self, perms):
"""
Sets the access permissions of the map.
:param perms: the new permissions.
"""
assert isinstance(perms, str) and len(perms) <= 3 and perms.strip() in ['', 'r', 'w', 'x', 'rw', 'r x', 'rx', 'rwx', 'wx', ]
self._perms = perms | Sets the access permissions of the map.
:param perms: the new permissions. | Below is the the instruction that describes the task:
### Input:
Sets the access permissions of the map.
:param perms: the new permissions.
### Response:
def _set_perms(self, perms):
"""
Sets the access permissions of the map.
:param perms: the new permissions.
"""
assert isinstance(perms, str) and len(perms) <= 3 and perms.strip() in ['', 'r', 'w', 'x', 'rw', 'r x', 'rx', 'rwx', 'wx', ]
self._perms = perms |
def build(self, runtime, layers):
"""
Build the image if one is not already on the system that matches the runtime and layers
Parameters
----------
runtime str
Name of the Lambda runtime
layers list(samcli.commands.local.lib.provider.Layer)
List of layers
Returns
-------
str
The image to be used (REPOSITORY:TAG)
"""
base_image = "{}:{}".format(self._DOCKER_LAMBDA_REPO_NAME, runtime)
# Don't build the image if there are no layers.
if not layers:
LOG.debug("Skipping building an image since no layers were defined")
return base_image
downloaded_layers = self.layer_downloader.download_all(layers, self.force_image_build)
docker_image_version = self._generate_docker_image_version(downloaded_layers, runtime)
image_tag = "{}:{}".format(self._SAM_CLI_REPO_NAME, docker_image_version)
image_not_found = False
try:
self.docker_client.images.get(image_tag)
except docker.errors.ImageNotFound:
LOG.info("Image was not found.")
image_not_found = True
if self.force_image_build or \
image_not_found or \
any(layer.is_defined_within_template for layer in downloaded_layers):
LOG.info("Building image...")
self._build_image(base_image, image_tag, downloaded_layers)
return image_tag | Build the image if one is not already on the system that matches the runtime and layers
Parameters
----------
runtime str
Name of the Lambda runtime
layers list(samcli.commands.local.lib.provider.Layer)
List of layers
Returns
-------
str
The image to be used (REPOSITORY:TAG) | Below is the the instruction that describes the task:
### Input:
Build the image if one is not already on the system that matches the runtime and layers
Parameters
----------
runtime str
Name of the Lambda runtime
layers list(samcli.commands.local.lib.provider.Layer)
List of layers
Returns
-------
str
The image to be used (REPOSITORY:TAG)
### Response:
def build(self, runtime, layers):
"""
Build the image if one is not already on the system that matches the runtime and layers
Parameters
----------
runtime str
Name of the Lambda runtime
layers list(samcli.commands.local.lib.provider.Layer)
List of layers
Returns
-------
str
The image to be used (REPOSITORY:TAG)
"""
base_image = "{}:{}".format(self._DOCKER_LAMBDA_REPO_NAME, runtime)
# Don't build the image if there are no layers.
if not layers:
LOG.debug("Skipping building an image since no layers were defined")
return base_image
downloaded_layers = self.layer_downloader.download_all(layers, self.force_image_build)
docker_image_version = self._generate_docker_image_version(downloaded_layers, runtime)
image_tag = "{}:{}".format(self._SAM_CLI_REPO_NAME, docker_image_version)
image_not_found = False
try:
self.docker_client.images.get(image_tag)
except docker.errors.ImageNotFound:
LOG.info("Image was not found.")
image_not_found = True
if self.force_image_build or \
image_not_found or \
any(layer.is_defined_within_template for layer in downloaded_layers):
LOG.info("Building image...")
self._build_image(base_image, image_tag, downloaded_layers)
return image_tag |
def efficiency(self, wavelengths=None):
"""Calculate :ref:`dimensionless efficiency <synphot-formula-qtlam>`.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, ``self.waveset`` is used.
Returns
-------
qtlam : `~astropy.units.quantity.Quantity`
Dimensionless efficiency.
"""
x = self._validate_wavelengths(wavelengths).value
y = self(x).value
qtlam = abs(np.trapz(y / x, x=x))
return qtlam * u.dimensionless_unscaled | Calculate :ref:`dimensionless efficiency <synphot-formula-qtlam>`.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, ``self.waveset`` is used.
Returns
-------
qtlam : `~astropy.units.quantity.Quantity`
Dimensionless efficiency. | Below is the the instruction that describes the task:
### Input:
Calculate :ref:`dimensionless efficiency <synphot-formula-qtlam>`.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, ``self.waveset`` is used.
Returns
-------
qtlam : `~astropy.units.quantity.Quantity`
Dimensionless efficiency.
### Response:
def efficiency(self, wavelengths=None):
"""Calculate :ref:`dimensionless efficiency <synphot-formula-qtlam>`.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for sampling.
If not a Quantity, assumed to be in Angstrom.
If `None`, ``self.waveset`` is used.
Returns
-------
qtlam : `~astropy.units.quantity.Quantity`
Dimensionless efficiency.
"""
x = self._validate_wavelengths(wavelengths).value
y = self(x).value
qtlam = abs(np.trapz(y / x, x=x))
return qtlam * u.dimensionless_unscaled |
def lookup_name_slow(self, name):
"""Returns a struct if one exists"""
for index in xrange(self.__get_count_cached()):
if self.__get_name_cached(index) == name:
return self.__get_info_cached(index) | Returns a struct if one exists | Below is the the instruction that describes the task:
### Input:
Returns a struct if one exists
### Response:
def lookup_name_slow(self, name):
"""Returns a struct if one exists"""
for index in xrange(self.__get_count_cached()):
if self.__get_name_cached(index) == name:
return self.__get_info_cached(index) |
def _query(self, data_category, resource_category, field, request, step, isotime=None):
"""
Request and return data from DataPoint RESTful API.
"""
rest_url = "/".join([HOST, data_category, resource_category, field, DATA_TYPE, request])
query_string = "?" + "&".join(["res=" + step, "time=" + isotime if isotime is not None else "", "key=" + self.key])
url = rest_url + query_string
page = url_lib.urlopen(url)
pg = page.read()
return pg | Request and return data from DataPoint RESTful API. | Below is the the instruction that describes the task:
### Input:
Request and return data from DataPoint RESTful API.
### Response:
def _query(self, data_category, resource_category, field, request, step, isotime=None):
"""
Request and return data from DataPoint RESTful API.
"""
rest_url = "/".join([HOST, data_category, resource_category, field, DATA_TYPE, request])
query_string = "?" + "&".join(["res=" + step, "time=" + isotime if isotime is not None else "", "key=" + self.key])
url = rest_url + query_string
page = url_lib.urlopen(url)
pg = page.read()
return pg |
def set_related_method(self, resource, full_resource_url):
"""
Using reflection, generate the related method and return it.
"""
method_name = self.get_method_name(resource, 'get')
def get(self, **kwargs):
return self._call_api_single_related_resource(
resource, full_resource_url, method_name, **kwargs
)
def get_list(self, **kwargs):
return self._call_api_many_related_resources(
resource, full_resource_url, method_name, **kwargs
)
if isinstance(full_resource_url, list):
setattr(
self, method_name,
types.MethodType(get_list, self)
)
else:
setattr(
self, method_name,
types.MethodType(get, self)
) | Using reflection, generate the related method and return it. | Below is the the instruction that describes the task:
### Input:
Using reflection, generate the related method and return it.
### Response:
def set_related_method(self, resource, full_resource_url):
"""
Using reflection, generate the related method and return it.
"""
method_name = self.get_method_name(resource, 'get')
def get(self, **kwargs):
return self._call_api_single_related_resource(
resource, full_resource_url, method_name, **kwargs
)
def get_list(self, **kwargs):
return self._call_api_many_related_resources(
resource, full_resource_url, method_name, **kwargs
)
if isinstance(full_resource_url, list):
setattr(
self, method_name,
types.MethodType(get_list, self)
)
else:
setattr(
self, method_name,
types.MethodType(get, self)
) |
def update(self, enabled=values.unset, webhook_url=values.unset,
webhook_method=values.unset):
"""
Update the ExportConfigurationInstance
:param bool enabled: The enabled
:param unicode webhook_url: The webhook_url
:param unicode webhook_method: The webhook_method
:returns: Updated ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance
"""
return self._proxy.update(enabled=enabled, webhook_url=webhook_url, webhook_method=webhook_method, ) | Update the ExportConfigurationInstance
:param bool enabled: The enabled
:param unicode webhook_url: The webhook_url
:param unicode webhook_method: The webhook_method
:returns: Updated ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance | Below is the the instruction that describes the task:
### Input:
Update the ExportConfigurationInstance
:param bool enabled: The enabled
:param unicode webhook_url: The webhook_url
:param unicode webhook_method: The webhook_method
:returns: Updated ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance
### Response:
def update(self, enabled=values.unset, webhook_url=values.unset,
webhook_method=values.unset):
"""
Update the ExportConfigurationInstance
:param bool enabled: The enabled
:param unicode webhook_url: The webhook_url
:param unicode webhook_method: The webhook_method
:returns: Updated ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance
"""
return self._proxy.update(enabled=enabled, webhook_url=webhook_url, webhook_method=webhook_method, ) |
def detect_dangerous_timestamp(self, contract):
"""
Args:
contract (Contract)
Returns:
list((Function), (list (Node)))
"""
ret = []
for f in [f for f in contract.functions if f.contract == contract]:
nodes = self.timestamp(f)
if nodes:
ret.append((f, nodes))
return ret | Args:
contract (Contract)
Returns:
list((Function), (list (Node))) | Below is the the instruction that describes the task:
### Input:
Args:
contract (Contract)
Returns:
list((Function), (list (Node)))
### Response:
def detect_dangerous_timestamp(self, contract):
"""
Args:
contract (Contract)
Returns:
list((Function), (list (Node)))
"""
ret = []
for f in [f for f in contract.functions if f.contract == contract]:
nodes = self.timestamp(f)
if nodes:
ret.append((f, nodes))
return ret |
def dimensions_from_subgroups(s):
"""
Given a sorted list of subgroups, return a string appropriate to provide as
a composite track's `dimensions` arg.
Parameters
----------
s : list of SubGroup objects (or anything with a `name` attribute)
"""
letters = 'XYABCDEFGHIJKLMNOPQRSTUVWZ'
return ' '.join(['dim{0}={1}'.format(dim, sg.name) for dim, sg in zip(letters, s)]) | Given a sorted list of subgroups, return a string appropriate to provide as
a composite track's `dimensions` arg.
Parameters
----------
s : list of SubGroup objects (or anything with a `name` attribute) | Below is the the instruction that describes the task:
### Input:
Given a sorted list of subgroups, return a string appropriate to provide as
a composite track's `dimensions` arg.
Parameters
----------
s : list of SubGroup objects (or anything with a `name` attribute)
### Response:
def dimensions_from_subgroups(s):
"""
Given a sorted list of subgroups, return a string appropriate to provide as
a composite track's `dimensions` arg.
Parameters
----------
s : list of SubGroup objects (or anything with a `name` attribute)
"""
letters = 'XYABCDEFGHIJKLMNOPQRSTUVWZ'
return ' '.join(['dim{0}={1}'.format(dim, sg.name) for dim, sg in zip(letters, s)]) |
def _ann_store_annotations(self, item_with_annotations, node, overwrite=False):
"""Stores annotations into an hdf5 file."""
# If we overwrite delete all annotations first
if overwrite is True or overwrite == 'v_annotations':
annotated = self._all_get_from_attrs(node, HDF5StorageService.ANNOTATED)
if annotated:
current_attrs = node._v_attrs
for attr_name in current_attrs._v_attrnames:
if attr_name.startswith(HDF5StorageService.ANNOTATION_PREFIX):
delattr(current_attrs, attr_name)
delattr(current_attrs, HDF5StorageService.ANNOTATED)
self._hdf5file.flush()
# Only store annotations if the item has some
if not item_with_annotations.v_annotations.f_is_empty():
anno_dict = item_with_annotations.v_annotations._dict
current_attrs = node._v_attrs
changed = False
for field_name in anno_dict:
val = anno_dict[field_name]
field_name_with_prefix = HDF5StorageService.ANNOTATION_PREFIX + field_name
if field_name_with_prefix not in current_attrs:
# Only store *new* annotations, if they already exist on disk, skip storage
setattr(current_attrs, field_name_with_prefix, val)
changed = True
if changed:
setattr(current_attrs, HDF5StorageService.ANNOTATED, True)
self._hdf5file.flush() | Stores annotations into an hdf5 file. | Below is the the instruction that describes the task:
### Input:
Stores annotations into an hdf5 file.
### Response:
def _ann_store_annotations(self, item_with_annotations, node, overwrite=False):
"""Stores annotations into an hdf5 file."""
# If we overwrite delete all annotations first
if overwrite is True or overwrite == 'v_annotations':
annotated = self._all_get_from_attrs(node, HDF5StorageService.ANNOTATED)
if annotated:
current_attrs = node._v_attrs
for attr_name in current_attrs._v_attrnames:
if attr_name.startswith(HDF5StorageService.ANNOTATION_PREFIX):
delattr(current_attrs, attr_name)
delattr(current_attrs, HDF5StorageService.ANNOTATED)
self._hdf5file.flush()
# Only store annotations if the item has some
if not item_with_annotations.v_annotations.f_is_empty():
anno_dict = item_with_annotations.v_annotations._dict
current_attrs = node._v_attrs
changed = False
for field_name in anno_dict:
val = anno_dict[field_name]
field_name_with_prefix = HDF5StorageService.ANNOTATION_PREFIX + field_name
if field_name_with_prefix not in current_attrs:
# Only store *new* annotations, if they already exist on disk, skip storage
setattr(current_attrs, field_name_with_prefix, val)
changed = True
if changed:
setattr(current_attrs, HDF5StorageService.ANNOTATED, True)
self._hdf5file.flush() |
def get_nodes(self, jid, node=None):
"""
Request all nodes at a service or collection node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the collection node to query
:type node: :class:`str` or :data:`None`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The list of nodes at the service or collection node.
:rtype: :class:`~collections.abc.Sequence` of tuples consisting of the
node name and its description.
Request the nodes available at `jid`. If `node` is not :data:`None`,
the request returns the children of the :xep:`248` collection node
`node`. Make sure to check for the appropriate server feature first.
Return a list of tuples consisting of the node names and their
description (if available, otherwise :data:`None`). If more information
is needed, use :meth:`.DiscoClient.get_items` directly.
Only nodes whose :attr:`~.disco.xso.Item.jid` match the `jid` are
returned.
"""
response = yield from self._disco.query_items(
jid,
node=node,
)
result = []
for item in response.items:
if item.jid != jid:
continue
result.append((
item.node,
item.name,
))
return result | Request all nodes at a service or collection node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the collection node to query
:type node: :class:`str` or :data:`None`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The list of nodes at the service or collection node.
:rtype: :class:`~collections.abc.Sequence` of tuples consisting of the
node name and its description.
Request the nodes available at `jid`. If `node` is not :data:`None`,
the request returns the children of the :xep:`248` collection node
`node`. Make sure to check for the appropriate server feature first.
Return a list of tuples consisting of the node names and their
description (if available, otherwise :data:`None`). If more information
is needed, use :meth:`.DiscoClient.get_items` directly.
Only nodes whose :attr:`~.disco.xso.Item.jid` match the `jid` are
returned. | Below is the the instruction that describes the task:
### Input:
Request all nodes at a service or collection node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the collection node to query
:type node: :class:`str` or :data:`None`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The list of nodes at the service or collection node.
:rtype: :class:`~collections.abc.Sequence` of tuples consisting of the
node name and its description.
Request the nodes available at `jid`. If `node` is not :data:`None`,
the request returns the children of the :xep:`248` collection node
`node`. Make sure to check for the appropriate server feature first.
Return a list of tuples consisting of the node names and their
description (if available, otherwise :data:`None`). If more information
is needed, use :meth:`.DiscoClient.get_items` directly.
Only nodes whose :attr:`~.disco.xso.Item.jid` match the `jid` are
returned.
### Response:
def get_nodes(self, jid, node=None):
"""
Request all nodes at a service or collection node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the collection node to query
:type node: :class:`str` or :data:`None`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The list of nodes at the service or collection node.
:rtype: :class:`~collections.abc.Sequence` of tuples consisting of the
node name and its description.
Request the nodes available at `jid`. If `node` is not :data:`None`,
the request returns the children of the :xep:`248` collection node
`node`. Make sure to check for the appropriate server feature first.
Return a list of tuples consisting of the node names and their
description (if available, otherwise :data:`None`). If more information
is needed, use :meth:`.DiscoClient.get_items` directly.
Only nodes whose :attr:`~.disco.xso.Item.jid` match the `jid` are
returned.
"""
response = yield from self._disco.query_items(
jid,
node=node,
)
result = []
for item in response.items:
if item.jid != jid:
continue
result.append((
item.node,
item.name,
))
return result |
def high_limit(self) -> Optional[Union[int, float]]:
"""
High limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm high limit.
For LS-30 base units, this is either alarm OR control high limit,
as indicated by special_status ControlAlarm bit flag.
"""
return self._get_field_value(SpecialDevice.PROP_HIGH_LIMIT) | High limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm high limit.
For LS-30 base units, this is either alarm OR control high limit,
as indicated by special_status ControlAlarm bit flag. | Below is the the instruction that describes the task:
### Input:
High limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm high limit.
For LS-30 base units, this is either alarm OR control high limit,
as indicated by special_status ControlAlarm bit flag.
### Response:
def high_limit(self) -> Optional[Union[int, float]]:
"""
High limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm high limit.
For LS-30 base units, this is either alarm OR control high limit,
as indicated by special_status ControlAlarm bit flag.
"""
return self._get_field_value(SpecialDevice.PROP_HIGH_LIMIT) |
def file_to_list(path):
"""
Return the contents of a file as a list when given a path.
"""
if not os.path.exists(path):
ui.error(c.MESSAGES["path_missing"], path)
sys.exit(1)
with codecs.open(path, "r", "UTF-8") as contents:
lines = contents.read().splitlines()
return lines | Return the contents of a file as a list when given a path. | Below is the the instruction that describes the task:
### Input:
Return the contents of a file as a list when given a path.
### Response:
def file_to_list(path):
"""
Return the contents of a file as a list when given a path.
"""
if not os.path.exists(path):
ui.error(c.MESSAGES["path_missing"], path)
sys.exit(1)
with codecs.open(path, "r", "UTF-8") as contents:
lines = contents.read().splitlines()
return lines |
def create_customer(self, name, **kwargs):
"""
Creates a customer with a name. All other parameters are optional. They
are: `note`, `active_hourly_rate`, `hourly_rate`,
`hourly_rates_per_service`, and `archived`.
"""
data = self._wrap_dict("customer", kwargs)
data["customer"]["name"] = name
return self.post("/customers.json", data=data) | Creates a customer with a name. All other parameters are optional. They
are: `note`, `active_hourly_rate`, `hourly_rate`,
`hourly_rates_per_service`, and `archived`. | Below is the the instruction that describes the task:
### Input:
Creates a customer with a name. All other parameters are optional. They
are: `note`, `active_hourly_rate`, `hourly_rate`,
`hourly_rates_per_service`, and `archived`.
### Response:
def create_customer(self, name, **kwargs):
"""
Creates a customer with a name. All other parameters are optional. They
are: `note`, `active_hourly_rate`, `hourly_rate`,
`hourly_rates_per_service`, and `archived`.
"""
data = self._wrap_dict("customer", kwargs)
data["customer"]["name"] = name
return self.post("/customers.json", data=data) |
def _validate_return_fields(self, return_fields):
"""
:param return_fields: tuple
:raises: pybomb.exceptions.InvalidReturnFieldException
"""
for return_field in return_fields:
if return_field not in self.RESPONSE_FIELD_MAP:
raise InvalidReturnFieldException(
'"{0}" is an invalid return field'.format(return_field)
) | :param return_fields: tuple
:raises: pybomb.exceptions.InvalidReturnFieldException | Below is the the instruction that describes the task:
### Input:
:param return_fields: tuple
:raises: pybomb.exceptions.InvalidReturnFieldException
### Response:
def _validate_return_fields(self, return_fields):
"""
:param return_fields: tuple
:raises: pybomb.exceptions.InvalidReturnFieldException
"""
for return_field in return_fields:
if return_field not in self.RESPONSE_FIELD_MAP:
raise InvalidReturnFieldException(
'"{0}" is an invalid return field'.format(return_field)
) |
def isclose(a, b, atol):
"""
A replacement for np.isclose that does fewer checks
and validation and as a result is roughly 4x faster.
Note that this is used in tight loops, and as such
a and b MUST be np.ndarray, not list or "array-like"
Parameters
----------
a : np.ndarray
To be compared
b : np.ndarray
To be compared
atol : float
Acceptable distance between `a` and `b` to be "close"
Returns
-----------
close : np.ndarray, bool
Per- element closeness
"""
diff = a - b
close = np.logical_and(diff > -atol, diff < atol)
return close | A replacement for np.isclose that does fewer checks
and validation and as a result is roughly 4x faster.
Note that this is used in tight loops, and as such
a and b MUST be np.ndarray, not list or "array-like"
Parameters
----------
a : np.ndarray
To be compared
b : np.ndarray
To be compared
atol : float
Acceptable distance between `a` and `b` to be "close"
Returns
-----------
close : np.ndarray, bool
Per- element closeness | Below is the the instruction that describes the task:
### Input:
A replacement for np.isclose that does fewer checks
and validation and as a result is roughly 4x faster.
Note that this is used in tight loops, and as such
a and b MUST be np.ndarray, not list or "array-like"
Parameters
----------
a : np.ndarray
To be compared
b : np.ndarray
To be compared
atol : float
Acceptable distance between `a` and `b` to be "close"
Returns
-----------
close : np.ndarray, bool
Per- element closeness
### Response:
def isclose(a, b, atol):
"""
A replacement for np.isclose that does fewer checks
and validation and as a result is roughly 4x faster.
Note that this is used in tight loops, and as such
a and b MUST be np.ndarray, not list or "array-like"
Parameters
----------
a : np.ndarray
To be compared
b : np.ndarray
To be compared
atol : float
Acceptable distance between `a` and `b` to be "close"
Returns
-----------
close : np.ndarray, bool
Per- element closeness
"""
diff = a - b
close = np.logical_and(diff > -atol, diff < atol)
return close |
def add_datetime(dataframe, timestamp_key='UNIXTIME'):
"""Add an additional DATETIME column with standar datetime format.
This currently manipulates the incoming DataFrame!
"""
def convert_data(timestamp):
return datetime.fromtimestamp(float(timestamp) / 1e3, UTC_TZ)
try:
log.debug("Adding DATETIME column to the data")
converted = dataframe[timestamp_key].apply(convert_data)
dataframe['DATETIME'] = converted
except KeyError:
log.warning("Could not add DATETIME column") | Add an additional DATETIME column with standar datetime format.
This currently manipulates the incoming DataFrame! | Below is the the instruction that describes the task:
### Input:
Add an additional DATETIME column with standar datetime format.
This currently manipulates the incoming DataFrame!
### Response:
def add_datetime(dataframe, timestamp_key='UNIXTIME'):
"""Add an additional DATETIME column with standar datetime format.
This currently manipulates the incoming DataFrame!
"""
def convert_data(timestamp):
return datetime.fromtimestamp(float(timestamp) / 1e3, UTC_TZ)
try:
log.debug("Adding DATETIME column to the data")
converted = dataframe[timestamp_key].apply(convert_data)
dataframe['DATETIME'] = converted
except KeyError:
log.warning("Could not add DATETIME column") |
def get_dataset(self, key, info):
"""Load a dataset."""
logger.debug('Reading %s.', key.name)
variable = self.nc[key.name]
info.update(variable.attrs)
info.update(key.to_dict())
info.update(dict(platform_name=self.platform_name,
sensor=self.sensor))
variable.attrs = info
return variable | Load a dataset. | Below is the the instruction that describes the task:
### Input:
Load a dataset.
### Response:
def get_dataset(self, key, info):
"""Load a dataset."""
logger.debug('Reading %s.', key.name)
variable = self.nc[key.name]
info.update(variable.attrs)
info.update(key.to_dict())
info.update(dict(platform_name=self.platform_name,
sensor=self.sensor))
variable.attrs = info
return variable |
def list(self, count=30, order='user_ptime', asc=False, show_dir=True,
natsort=True):
"""
List files of the associated directory to this task.
:param int count: number of entries to be listed
:param str order: originally named `o`
:param bool asc: whether in ascending order
:param bool show_dir: whether to show directories
"""
return self.directory.list(count, order, asc, show_dir, natsort) | List files of the associated directory to this task.
:param int count: number of entries to be listed
:param str order: originally named `o`
:param bool asc: whether in ascending order
:param bool show_dir: whether to show directories | Below is the the instruction that describes the task:
### Input:
List files of the associated directory to this task.
:param int count: number of entries to be listed
:param str order: originally named `o`
:param bool asc: whether in ascending order
:param bool show_dir: whether to show directories
### Response:
def list(self, count=30, order='user_ptime', asc=False, show_dir=True,
natsort=True):
"""
List files of the associated directory to this task.
:param int count: number of entries to be listed
:param str order: originally named `o`
:param bool asc: whether in ascending order
:param bool show_dir: whether to show directories
"""
return self.directory.list(count, order, asc, show_dir, natsort) |
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
"""
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to']) | \
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen. | Below is the the instruction that describes the task:
### Input:
\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
### Response:
def upgrade_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Upgrade the database to the latest version.
Usage:
pld-ugprade [options]
Options:
--from=<v> Upgrade from a specific version, overriding
the version stored in the database.
--to=<v> Upgrade to a specific version instead of the
latest version.
-h --help Show this screen.
"""
arguments = docopt(upgrade_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
upgrade(from_version=arguments['--from'], to_version=arguments['--to']) |
def price_and_currency(self):
"""Get Offer Price and Currency.
Return price according to the following process:
* If product has a sale return Sales Price, otherwise,
* Return Price, otherwise,
* Return lowest offer price, otherwise,
* Return None.
:return:
A tuple containing:
1. Float representation of price.
2. ISO Currency code (string).
"""
price = self._safe_get_element_text(
'Offers.Offer.OfferListing.SalePrice.Amount')
if price:
currency = self._safe_get_element_text(
'Offers.Offer.OfferListing.SalePrice.CurrencyCode')
else:
price = self._safe_get_element_text(
'Offers.Offer.OfferListing.Price.Amount')
if price:
currency = self._safe_get_element_text(
'Offers.Offer.OfferListing.Price.CurrencyCode')
else:
price = self._safe_get_element_text(
'OfferSummary.LowestNewPrice.Amount')
currency = self._safe_get_element_text(
'OfferSummary.LowestNewPrice.CurrencyCode')
if price:
return float(price) / 100, currency
else:
return None, None | Get Offer Price and Currency.
Return price according to the following process:
* If product has a sale return Sales Price, otherwise,
* Return Price, otherwise,
* Return lowest offer price, otherwise,
* Return None.
:return:
A tuple containing:
1. Float representation of price.
2. ISO Currency code (string). | Below is the the instruction that describes the task:
### Input:
Get Offer Price and Currency.
Return price according to the following process:
* If product has a sale return Sales Price, otherwise,
* Return Price, otherwise,
* Return lowest offer price, otherwise,
* Return None.
:return:
A tuple containing:
1. Float representation of price.
2. ISO Currency code (string).
### Response:
def price_and_currency(self):
"""Get Offer Price and Currency.
Return price according to the following process:
* If product has a sale return Sales Price, otherwise,
* Return Price, otherwise,
* Return lowest offer price, otherwise,
* Return None.
:return:
A tuple containing:
1. Float representation of price.
2. ISO Currency code (string).
"""
price = self._safe_get_element_text(
'Offers.Offer.OfferListing.SalePrice.Amount')
if price:
currency = self._safe_get_element_text(
'Offers.Offer.OfferListing.SalePrice.CurrencyCode')
else:
price = self._safe_get_element_text(
'Offers.Offer.OfferListing.Price.Amount')
if price:
currency = self._safe_get_element_text(
'Offers.Offer.OfferListing.Price.CurrencyCode')
else:
price = self._safe_get_element_text(
'OfferSummary.LowestNewPrice.Amount')
currency = self._safe_get_element_text(
'OfferSummary.LowestNewPrice.CurrencyCode')
if price:
return float(price) / 100, currency
else:
return None, None |
def run_interrupted(self):
"""
Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
start = datetime.datetime.now()
try:
cwd = os.getcwd()
v = sys.version.replace("\n", " ")
logger.info("Custodian started in singleshot mode at {} in {}."
.format(start, cwd))
logger.info("Custodian running on Python version {}".format(v))
# load run log
if os.path.exists(Custodian.LOG_FILE):
self.run_log = loadfn(Custodian.LOG_FILE, cls=MontyDecoder)
if len(self.run_log) == 0:
# starting up an initial job - setup input and quit
job_n = 0
job = self.jobs[job_n]
logger.info("Setting up job no. 1 ({}) ".format(job.name))
job.setup()
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
return len(self.jobs)
else:
# Continuing after running calculation
job_n = self.run_log[-1]['job_n']
job = self.jobs[job_n]
# If we had to fix errors from a previous run, insert clean log
# dict
if len(self.run_log[-1]['corrections']) > 0:
logger.info("Reran {}.run due to fixable errors".format(
job.name))
# check error handlers
logger.info("Checking error handlers for {}.run".format(
job.name))
if self._do_check(self.handlers):
logger.info("Failed validation based on error handlers")
# raise an error for an unrecoverable error
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise NonRecoverableError(s, True, x["handler"])
logger.info("Corrected input based on error handlers")
# Return with more jobs to run if recoverable error caught
# and corrected for
return len(self.jobs) - job_n
# check validators
logger.info("Checking validator for {}.run".format(job.name))
for v in self.validators:
if v.check():
self.run_log[-1]["validator"] = v
logger.info("Failed validation based on validator")
s = "Validation failed: {}".format(v)
raise ValidationError(s, True, v)
logger.info("Postprocessing for {}.run".format(job.name))
job.postprocess()
# IF DONE WITH ALL JOBS - DELETE ALL CHECKPOINTS AND RETURN
# VALIDATED
if len(self.jobs) == (job_n + 1):
self.finished = True
return 0
# Setup next job_n
job_n += 1
job = self.jobs[job_n]
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
job.setup()
return len(self.jobs) - job_n
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(Custodian.LOG_FILE))
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
if self.finished and self.gzipped_output:
gzip_dir(".") | Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached | Below is the the instruction that describes the task:
### Input:
Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
### Response:
def run_interrupted(self):
"""
Runs custodian in a interuppted mode, which sets up and
validates jobs but doesn't run the executable
Returns:
number of remaining jobs
Raises:
ValidationError: if a job fails validation
ReturnCodeError: if the process has a return code different from 0
NonRecoverableError: if an unrecoverable occurs
MaxCorrectionsPerJobError: if max_errors_per_job is reached
MaxCorrectionsError: if max_errors is reached
MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
"""
start = datetime.datetime.now()
try:
cwd = os.getcwd()
v = sys.version.replace("\n", " ")
logger.info("Custodian started in singleshot mode at {} in {}."
.format(start, cwd))
logger.info("Custodian running on Python version {}".format(v))
# load run log
if os.path.exists(Custodian.LOG_FILE):
self.run_log = loadfn(Custodian.LOG_FILE, cls=MontyDecoder)
if len(self.run_log) == 0:
# starting up an initial job - setup input and quit
job_n = 0
job = self.jobs[job_n]
logger.info("Setting up job no. 1 ({}) ".format(job.name))
job.setup()
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
return len(self.jobs)
else:
# Continuing after running calculation
job_n = self.run_log[-1]['job_n']
job = self.jobs[job_n]
# If we had to fix errors from a previous run, insert clean log
# dict
if len(self.run_log[-1]['corrections']) > 0:
logger.info("Reran {}.run due to fixable errors".format(
job.name))
# check error handlers
logger.info("Checking error handlers for {}.run".format(
job.name))
if self._do_check(self.handlers):
logger.info("Failed validation based on error handlers")
# raise an error for an unrecoverable error
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
self.run_log[-1]["handler"] = x["handler"]
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise NonRecoverableError(s, True, x["handler"])
logger.info("Corrected input based on error handlers")
# Return with more jobs to run if recoverable error caught
# and corrected for
return len(self.jobs) - job_n
# check validators
logger.info("Checking validator for {}.run".format(job.name))
for v in self.validators:
if v.check():
self.run_log[-1]["validator"] = v
logger.info("Failed validation based on validator")
s = "Validation failed: {}".format(v)
raise ValidationError(s, True, v)
logger.info("Postprocessing for {}.run".format(job.name))
job.postprocess()
# IF DONE WITH ALL JOBS - DELETE ALL CHECKPOINTS AND RETURN
# VALIDATED
if len(self.jobs) == (job_n + 1):
self.finished = True
return 0
# Setup next job_n
job_n += 1
job = self.jobs[job_n]
self.run_log.append({"job": job.as_dict(), "corrections": [],
'job_n': job_n})
job.setup()
return len(self.jobs) - job_n
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(Custodian.LOG_FILE))
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
if self.finished and self.gzipped_output:
gzip_dir(".") |
def compare(
dataset_dict,
ic="waic",
method="BB-pseudo-BMA",
b_samples=1000,
alpha=1,
seed=None,
scale="deviance",
):
r"""Compare models based on WAIC or LOO cross validation.
WAIC is Widely applicable information criterion, and LOO is leave-one-out
(LOO) cross-validation. Read more theory here - in a paper by some of the
leading authorities on model selection - dx.doi.org/10.1111/1467-9868.00353
Parameters
----------
dataset_dict : dict[str] -> InferenceData
A dictionary of model names and InferenceData objects
ic : str
Information Criterion (WAIC or LOO) used to compare models. Default WAIC.
method : str
Method used to estimate the weights for each model. Available options are:
- 'stacking' : stacking of predictive distributions.
- 'BB-pseudo-BMA' : (default) pseudo-Bayesian Model averaging using Akaike-type
weighting. The weights are stabilized using the Bayesian bootstrap
- 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type
weighting, without Bootstrap stabilization (not recommended)
For more information read https://arxiv.org/abs/1704.02030
b_samples: int
Number of samples taken by the Bayesian bootstrap estimation.
Only useful when method = 'BB-pseudo-BMA'.
alpha : float
The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform
on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1.
seed : int or np.random.RandomState instance
If int or RandomState, use it for seeding Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. Default None the global
np.random state is used.
scale : str
Output scale for IC. Available options are:
- `deviance` : (default) -2 * (log-score)
- `log` : 1 * log-score (after Vehtari et al. (2017))
- `negative_log` : -1 * (log-score)
Returns
-------
A DataFrame, ordered from lowest to highest IC. The index reflects the order in which the
models are passed to this function. The columns are:
IC : Information Criteria (WAIC or LOO).
Smaller IC indicates higher out-of-sample predictive fit ("better" model). Default WAIC.
If `scale == log` higher IC indicates higher out-of-sample predictive fit ("better" model).
pIC : Estimated effective number of parameters.
dIC : Relative difference between each IC (WAIC or LOO)
and the lowest IC (WAIC or LOO).
It's always 0 for the top-ranked model.
weight: Relative weight for each model.
This can be loosely interpreted as the probability of each model (among the compared model)
given the data. By default the uncertainty in the weights estimation is considered using
Bayesian bootstrap.
SE : Standard error of the IC estimate.
If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap.
dSE : Standard error of the difference in IC between each model and
the top-ranked model.
It's always 0 for the top-ranked model.
warning : A value of 1 indicates that the computation of the IC may not be reliable. This could
be indication of WAIC/LOO starting to fail see http://arxiv.org/abs/1507.04544 for details.
scale : Scale used for the IC.
"""
names = list(dataset_dict.keys())
scale = scale.lower()
if scale == "log":
scale_value = 1
ascending = False
else:
if scale == "negative_log":
scale_value = -1
else:
scale_value = -2
ascending = True
if ic == "waic":
ic_func = waic
df_comp = pd.DataFrame(
index=names,
columns=["waic", "p_waic", "d_waic", "weight", "se", "dse", "warning", "waic_scale"],
)
scale_col = "waic_scale"
elif ic == "loo":
ic_func = loo
df_comp = pd.DataFrame(
index=names,
columns=["loo", "p_loo", "d_loo", "weight", "se", "dse", "warning", "loo_scale"],
)
scale_col = "loo_scale"
else:
raise NotImplementedError("The information criterion {} is not supported.".format(ic))
if method.lower() not in ["stacking", "bb-pseudo-bma", "pseudo-bma"]:
raise ValueError("The method {}, to compute weights, is not supported.".format(method))
ic_se = "{}_se".format(ic)
p_ic = "p_{}".format(ic)
ic_i = "{}_i".format(ic)
ics = pd.DataFrame()
names = []
for name, dataset in dataset_dict.items():
names.append(name)
ics = ics.append([ic_func(dataset, pointwise=True, scale=scale)])
ics.index = names
ics.sort_values(by=ic, inplace=True, ascending=ascending)
if method.lower() == "stacking":
rows, cols, ic_i_val = _ic_matrix(ics, ic_i)
exp_ic_i = np.exp(ic_i_val / scale_value)
last_col = cols - 1
def w_fuller(weights):
return np.concatenate((weights, [max(1.0 - np.sum(weights), 0.0)]))
def log_score(weights):
w_full = w_fuller(weights)
score = 0.0
for i in range(rows):
score += np.log(np.dot(exp_ic_i[i], w_full))
return -score
def gradient(weights):
w_full = w_fuller(weights)
grad = np.zeros(last_col)
for k in range(last_col - 1):
for i in range(rows):
grad[k] += (exp_ic_i[i, k] - exp_ic_i[i, last_col]) / np.dot(
exp_ic_i[i], w_full
)
return -grad
theta = np.full(last_col, 1.0 / cols)
bounds = [(0.0, 1.0) for _ in range(last_col)]
constraints = [
{"type": "ineq", "fun": lambda x: 1.0 - np.sum(x)},
{"type": "ineq", "fun": np.sum},
]
weights = minimize(
fun=log_score, x0=theta, jac=gradient, bounds=bounds, constraints=constraints
)
weights = w_fuller(weights["x"])
ses = ics[ic_se]
elif method.lower() == "bb-pseudo-bma":
rows, cols, ic_i_val = _ic_matrix(ics, ic_i)
ic_i_val = ic_i_val * rows
b_weighting = st.dirichlet.rvs(alpha=[alpha] * rows, size=b_samples, random_state=seed)
weights = np.zeros((b_samples, cols))
z_bs = np.zeros_like(weights)
for i in range(b_samples):
z_b = np.dot(b_weighting[i], ic_i_val)
u_weights = np.exp((z_b - np.min(z_b)) / scale_value)
z_bs[i] = z_b # pylint: disable=unsupported-assignment-operation
weights[i] = u_weights / np.sum(u_weights)
weights = weights.mean(axis=0)
ses = pd.Series(z_bs.std(axis=0), index=names) # pylint: disable=no-member
elif method.lower() == "pseudo-bma":
min_ic = ics.iloc[0][ic]
z_rv = np.exp((ics[ic] - min_ic) / scale_value)
weights = z_rv / np.sum(z_rv)
ses = ics[ic_se]
if np.any(weights):
min_ic_i_val = ics[ic_i].iloc[0]
for idx, val in enumerate(ics.index):
res = ics.loc[val]
if scale_value < 0:
diff = res[ic_i] - min_ic_i_val
else:
diff = min_ic_i_val - res[ic_i]
d_ic = np.sum(diff)
d_std_err = np.sqrt(len(diff) * np.var(diff))
std_err = ses.loc[val]
weight = weights[idx]
df_comp.at[val] = (
res[ic],
res[p_ic],
d_ic,
weight,
std_err,
d_std_err,
res["warning"],
res[scale_col],
)
return df_comp.sort_values(by=ic, ascending=ascending) | r"""Compare models based on WAIC or LOO cross validation.
WAIC is Widely applicable information criterion, and LOO is leave-one-out
(LOO) cross-validation. Read more theory here - in a paper by some of the
leading authorities on model selection - dx.doi.org/10.1111/1467-9868.00353
Parameters
----------
dataset_dict : dict[str] -> InferenceData
A dictionary of model names and InferenceData objects
ic : str
Information Criterion (WAIC or LOO) used to compare models. Default WAIC.
method : str
Method used to estimate the weights for each model. Available options are:
- 'stacking' : stacking of predictive distributions.
- 'BB-pseudo-BMA' : (default) pseudo-Bayesian Model averaging using Akaike-type
weighting. The weights are stabilized using the Bayesian bootstrap
- 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type
weighting, without Bootstrap stabilization (not recommended)
For more information read https://arxiv.org/abs/1704.02030
b_samples: int
Number of samples taken by the Bayesian bootstrap estimation.
Only useful when method = 'BB-pseudo-BMA'.
alpha : float
The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform
on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1.
seed : int or np.random.RandomState instance
If int or RandomState, use it for seeding Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. Default None the global
np.random state is used.
scale : str
Output scale for IC. Available options are:
- `deviance` : (default) -2 * (log-score)
- `log` : 1 * log-score (after Vehtari et al. (2017))
- `negative_log` : -1 * (log-score)
Returns
-------
A DataFrame, ordered from lowest to highest IC. The index reflects the order in which the
models are passed to this function. The columns are:
IC : Information Criteria (WAIC or LOO).
Smaller IC indicates higher out-of-sample predictive fit ("better" model). Default WAIC.
If `scale == log` higher IC indicates higher out-of-sample predictive fit ("better" model).
pIC : Estimated effective number of parameters.
dIC : Relative difference between each IC (WAIC or LOO)
and the lowest IC (WAIC or LOO).
It's always 0 for the top-ranked model.
weight: Relative weight for each model.
This can be loosely interpreted as the probability of each model (among the compared model)
given the data. By default the uncertainty in the weights estimation is considered using
Bayesian bootstrap.
SE : Standard error of the IC estimate.
If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap.
dSE : Standard error of the difference in IC between each model and
the top-ranked model.
It's always 0 for the top-ranked model.
warning : A value of 1 indicates that the computation of the IC may not be reliable. This could
be indication of WAIC/LOO starting to fail see http://arxiv.org/abs/1507.04544 for details.
scale : Scale used for the IC. | Below is the the instruction that describes the task:
### Input:
r"""Compare models based on WAIC or LOO cross validation.
WAIC is Widely applicable information criterion, and LOO is leave-one-out
(LOO) cross-validation. Read more theory here - in a paper by some of the
leading authorities on model selection - dx.doi.org/10.1111/1467-9868.00353
Parameters
----------
dataset_dict : dict[str] -> InferenceData
A dictionary of model names and InferenceData objects
ic : str
Information Criterion (WAIC or LOO) used to compare models. Default WAIC.
method : str
Method used to estimate the weights for each model. Available options are:
- 'stacking' : stacking of predictive distributions.
- 'BB-pseudo-BMA' : (default) pseudo-Bayesian Model averaging using Akaike-type
weighting. The weights are stabilized using the Bayesian bootstrap
- 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type
weighting, without Bootstrap stabilization (not recommended)
For more information read https://arxiv.org/abs/1704.02030
b_samples: int
Number of samples taken by the Bayesian bootstrap estimation.
Only useful when method = 'BB-pseudo-BMA'.
alpha : float
The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform
on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1.
seed : int or np.random.RandomState instance
If int or RandomState, use it for seeding Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. Default None the global
np.random state is used.
scale : str
Output scale for IC. Available options are:
- `deviance` : (default) -2 * (log-score)
- `log` : 1 * log-score (after Vehtari et al. (2017))
- `negative_log` : -1 * (log-score)
Returns
-------
A DataFrame, ordered from lowest to highest IC. The index reflects the order in which the
models are passed to this function. The columns are:
IC : Information Criteria (WAIC or LOO).
Smaller IC indicates higher out-of-sample predictive fit ("better" model). Default WAIC.
If `scale == log` higher IC indicates higher out-of-sample predictive fit ("better" model).
pIC : Estimated effective number of parameters.
dIC : Relative difference between each IC (WAIC or LOO)
and the lowest IC (WAIC or LOO).
It's always 0 for the top-ranked model.
weight: Relative weight for each model.
This can be loosely interpreted as the probability of each model (among the compared model)
given the data. By default the uncertainty in the weights estimation is considered using
Bayesian bootstrap.
SE : Standard error of the IC estimate.
If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap.
dSE : Standard error of the difference in IC between each model and
the top-ranked model.
It's always 0 for the top-ranked model.
warning : A value of 1 indicates that the computation of the IC may not be reliable. This could
be indication of WAIC/LOO starting to fail see http://arxiv.org/abs/1507.04544 for details.
scale : Scale used for the IC.
### Response:
def compare(
dataset_dict,
ic="waic",
method="BB-pseudo-BMA",
b_samples=1000,
alpha=1,
seed=None,
scale="deviance",
):
r"""Compare models based on WAIC or LOO cross validation.
WAIC is Widely applicable information criterion, and LOO is leave-one-out
(LOO) cross-validation. Read more theory here - in a paper by some of the
leading authorities on model selection - dx.doi.org/10.1111/1467-9868.00353
Parameters
----------
dataset_dict : dict[str] -> InferenceData
A dictionary of model names and InferenceData objects
ic : str
Information Criterion (WAIC or LOO) used to compare models. Default WAIC.
method : str
Method used to estimate the weights for each model. Available options are:
- 'stacking' : stacking of predictive distributions.
- 'BB-pseudo-BMA' : (default) pseudo-Bayesian Model averaging using Akaike-type
weighting. The weights are stabilized using the Bayesian bootstrap
- 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type
weighting, without Bootstrap stabilization (not recommended)
For more information read https://arxiv.org/abs/1704.02030
b_samples: int
Number of samples taken by the Bayesian bootstrap estimation.
Only useful when method = 'BB-pseudo-BMA'.
alpha : float
The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform
on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1.
seed : int or np.random.RandomState instance
If int or RandomState, use it for seeding Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. Default None the global
np.random state is used.
scale : str
Output scale for IC. Available options are:
- `deviance` : (default) -2 * (log-score)
- `log` : 1 * log-score (after Vehtari et al. (2017))
- `negative_log` : -1 * (log-score)
Returns
-------
A DataFrame, ordered from lowest to highest IC. The index reflects the order in which the
models are passed to this function. The columns are:
IC : Information Criteria (WAIC or LOO).
Smaller IC indicates higher out-of-sample predictive fit ("better" model). Default WAIC.
If `scale == log` higher IC indicates higher out-of-sample predictive fit ("better" model).
pIC : Estimated effective number of parameters.
dIC : Relative difference between each IC (WAIC or LOO)
and the lowest IC (WAIC or LOO).
It's always 0 for the top-ranked model.
weight: Relative weight for each model.
This can be loosely interpreted as the probability of each model (among the compared model)
given the data. By default the uncertainty in the weights estimation is considered using
Bayesian bootstrap.
SE : Standard error of the IC estimate.
If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap.
dSE : Standard error of the difference in IC between each model and
the top-ranked model.
It's always 0 for the top-ranked model.
warning : A value of 1 indicates that the computation of the IC may not be reliable. This could
be indication of WAIC/LOO starting to fail see http://arxiv.org/abs/1507.04544 for details.
scale : Scale used for the IC.
"""
names = list(dataset_dict.keys())
scale = scale.lower()
if scale == "log":
scale_value = 1
ascending = False
else:
if scale == "negative_log":
scale_value = -1
else:
scale_value = -2
ascending = True
if ic == "waic":
ic_func = waic
df_comp = pd.DataFrame(
index=names,
columns=["waic", "p_waic", "d_waic", "weight", "se", "dse", "warning", "waic_scale"],
)
scale_col = "waic_scale"
elif ic == "loo":
ic_func = loo
df_comp = pd.DataFrame(
index=names,
columns=["loo", "p_loo", "d_loo", "weight", "se", "dse", "warning", "loo_scale"],
)
scale_col = "loo_scale"
else:
raise NotImplementedError("The information criterion {} is not supported.".format(ic))
if method.lower() not in ["stacking", "bb-pseudo-bma", "pseudo-bma"]:
raise ValueError("The method {}, to compute weights, is not supported.".format(method))
ic_se = "{}_se".format(ic)
p_ic = "p_{}".format(ic)
ic_i = "{}_i".format(ic)
ics = pd.DataFrame()
names = []
for name, dataset in dataset_dict.items():
names.append(name)
ics = ics.append([ic_func(dataset, pointwise=True, scale=scale)])
ics.index = names
ics.sort_values(by=ic, inplace=True, ascending=ascending)
if method.lower() == "stacking":
rows, cols, ic_i_val = _ic_matrix(ics, ic_i)
exp_ic_i = np.exp(ic_i_val / scale_value)
last_col = cols - 1
def w_fuller(weights):
return np.concatenate((weights, [max(1.0 - np.sum(weights), 0.0)]))
def log_score(weights):
w_full = w_fuller(weights)
score = 0.0
for i in range(rows):
score += np.log(np.dot(exp_ic_i[i], w_full))
return -score
def gradient(weights):
w_full = w_fuller(weights)
grad = np.zeros(last_col)
for k in range(last_col - 1):
for i in range(rows):
grad[k] += (exp_ic_i[i, k] - exp_ic_i[i, last_col]) / np.dot(
exp_ic_i[i], w_full
)
return -grad
theta = np.full(last_col, 1.0 / cols)
bounds = [(0.0, 1.0) for _ in range(last_col)]
constraints = [
{"type": "ineq", "fun": lambda x: 1.0 - np.sum(x)},
{"type": "ineq", "fun": np.sum},
]
weights = minimize(
fun=log_score, x0=theta, jac=gradient, bounds=bounds, constraints=constraints
)
weights = w_fuller(weights["x"])
ses = ics[ic_se]
elif method.lower() == "bb-pseudo-bma":
rows, cols, ic_i_val = _ic_matrix(ics, ic_i)
ic_i_val = ic_i_val * rows
b_weighting = st.dirichlet.rvs(alpha=[alpha] * rows, size=b_samples, random_state=seed)
weights = np.zeros((b_samples, cols))
z_bs = np.zeros_like(weights)
for i in range(b_samples):
z_b = np.dot(b_weighting[i], ic_i_val)
u_weights = np.exp((z_b - np.min(z_b)) / scale_value)
z_bs[i] = z_b # pylint: disable=unsupported-assignment-operation
weights[i] = u_weights / np.sum(u_weights)
weights = weights.mean(axis=0)
ses = pd.Series(z_bs.std(axis=0), index=names) # pylint: disable=no-member
elif method.lower() == "pseudo-bma":
min_ic = ics.iloc[0][ic]
z_rv = np.exp((ics[ic] - min_ic) / scale_value)
weights = z_rv / np.sum(z_rv)
ses = ics[ic_se]
if np.any(weights):
min_ic_i_val = ics[ic_i].iloc[0]
for idx, val in enumerate(ics.index):
res = ics.loc[val]
if scale_value < 0:
diff = res[ic_i] - min_ic_i_val
else:
diff = min_ic_i_val - res[ic_i]
d_ic = np.sum(diff)
d_std_err = np.sqrt(len(diff) * np.var(diff))
std_err = ses.loc[val]
weight = weights[idx]
df_comp.at[val] = (
res[ic],
res[p_ic],
d_ic,
weight,
std_err,
d_std_err,
res["warning"],
res[scale_col],
)
return df_comp.sort_values(by=ic, ascending=ascending) |
def create_from_pytz(cls, tz_info):
"""Create an instance using the result of the timezone() call in
"pytz".
"""
zone_name = tz_info.zone
utc_transition_times_list_raw = getattr(tz_info,
'_utc_transition_times',
None)
utc_transition_times_list = [tuple(utt.timetuple())
for utt
in utc_transition_times_list_raw] \
if utc_transition_times_list_raw is not None \
else None
transition_info_list_raw = getattr(tz_info,
'_transition_info',
None)
transition_info_list = [(utcoffset_td.total_seconds(),
dst_td.total_seconds(),
tzname)
for (utcoffset_td, dst_td, tzname)
in transition_info_list_raw] \
if transition_info_list_raw is not None \
else None
try:
utcoffset_dt = tz_info._utcoffset
except AttributeError:
utcoffset = None
else:
utcoffset = utcoffset_dt.total_seconds()
tzname = getattr(tz_info, '_tzname', None)
parent_class_name = getmro(tz_info.__class__)[1].__name__
return cls(zone_name, parent_class_name, utc_transition_times_list,
transition_info_list, utcoffset, tzname) | Create an instance using the result of the timezone() call in
"pytz". | Below is the the instruction that describes the task:
### Input:
Create an instance using the result of the timezone() call in
"pytz".
### Response:
def create_from_pytz(cls, tz_info):
"""Create an instance using the result of the timezone() call in
"pytz".
"""
zone_name = tz_info.zone
utc_transition_times_list_raw = getattr(tz_info,
'_utc_transition_times',
None)
utc_transition_times_list = [tuple(utt.timetuple())
for utt
in utc_transition_times_list_raw] \
if utc_transition_times_list_raw is not None \
else None
transition_info_list_raw = getattr(tz_info,
'_transition_info',
None)
transition_info_list = [(utcoffset_td.total_seconds(),
dst_td.total_seconds(),
tzname)
for (utcoffset_td, dst_td, tzname)
in transition_info_list_raw] \
if transition_info_list_raw is not None \
else None
try:
utcoffset_dt = tz_info._utcoffset
except AttributeError:
utcoffset = None
else:
utcoffset = utcoffset_dt.total_seconds()
tzname = getattr(tz_info, '_tzname', None)
parent_class_name = getmro(tz_info.__class__)[1].__name__
return cls(zone_name, parent_class_name, utc_transition_times_list,
transition_info_list, utcoffset, tzname) |
def get_template(filename_or_string, is_string=False):
'''
Gets a jinja2 ``Template`` object for the input filename or string, with caching
based on the filename of the template, or the SHA1 of the input string.
'''
# Cache against string sha or just the filename
cache_key = sha1_hash(filename_or_string) if is_string else filename_or_string
if cache_key in TEMPLATES:
return TEMPLATES[cache_key]
if is_string:
# Set the input string as our template
template_string = filename_or_string
else:
# Load template data into memory
with open(filename_or_string, 'r') as file_io:
template_string = file_io.read()
TEMPLATES[cache_key] = Template(template_string, keep_trailing_newline=True)
return TEMPLATES[cache_key] | Gets a jinja2 ``Template`` object for the input filename or string, with caching
based on the filename of the template, or the SHA1 of the input string. | Below is the the instruction that describes the task:
### Input:
Gets a jinja2 ``Template`` object for the input filename or string, with caching
based on the filename of the template, or the SHA1 of the input string.
### Response:
def get_template(filename_or_string, is_string=False):
'''
Gets a jinja2 ``Template`` object for the input filename or string, with caching
based on the filename of the template, or the SHA1 of the input string.
'''
# Cache against string sha or just the filename
cache_key = sha1_hash(filename_or_string) if is_string else filename_or_string
if cache_key in TEMPLATES:
return TEMPLATES[cache_key]
if is_string:
# Set the input string as our template
template_string = filename_or_string
else:
# Load template data into memory
with open(filename_or_string, 'r') as file_io:
template_string = file_io.read()
TEMPLATES[cache_key] = Template(template_string, keep_trailing_newline=True)
return TEMPLATES[cache_key] |
def get_users_of_account_group(self, account_id, group_id, **kwargs): # noqa: E501
"""Get users of a group. # noqa: E501
An endpoint for listing users of the group with details. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/users -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_users_of_account_group(account_id, group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str group_id: The ID of the group whose users are retrieved. (required)
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:param str status__eq: An optional filter for getting users by status.
:param str status__in: An optional filter for getting users with a specified set of statuses.
:param str status__nin: An optional filter for excluding users with a specified set of statuses.
:return: UserInfoRespList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_users_of_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501
else:
(data) = self.get_users_of_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501
return data | Get users of a group. # noqa: E501
An endpoint for listing users of the group with details. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/users -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_users_of_account_group(account_id, group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str group_id: The ID of the group whose users are retrieved. (required)
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:param str status__eq: An optional filter for getting users by status.
:param str status__in: An optional filter for getting users with a specified set of statuses.
:param str status__nin: An optional filter for excluding users with a specified set of statuses.
:return: UserInfoRespList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get users of a group. # noqa: E501
An endpoint for listing users of the group with details. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/users -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_users_of_account_group(account_id, group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str group_id: The ID of the group whose users are retrieved. (required)
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:param str status__eq: An optional filter for getting users by status.
:param str status__in: An optional filter for getting users with a specified set of statuses.
:param str status__nin: An optional filter for excluding users with a specified set of statuses.
:return: UserInfoRespList
If the method is called asynchronously,
returns the request thread.
### Response:
def get_users_of_account_group(self, account_id, group_id, **kwargs): # noqa: E501
"""Get users of a group. # noqa: E501
An endpoint for listing users of the group with details. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/users -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_users_of_account_group(account_id, group_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str group_id: The ID of the group whose users are retrieved. (required)
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:param str status__eq: An optional filter for getting users by status.
:param str status__in: An optional filter for getting users with a specified set of statuses.
:param str status__nin: An optional filter for excluding users with a specified set of statuses.
:return: UserInfoRespList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_users_of_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501
else:
(data) = self.get_users_of_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501
return data |
def get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, **kwargs):
"""
Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
Returns:
(string): Return neuroglancer link.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
"""
return self.service.get_neuroglancer_link(resource, resolution, x_range, y_range, z_range, self.url_prefix, **kwargs) | Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
Returns:
(string): Return neuroglancer link.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation. | Below is the the instruction that describes the task:
### Input:
Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
Returns:
(string): Return neuroglancer link.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
### Response:
def get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, **kwargs):
"""
Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
Returns:
(string): Return neuroglancer link.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
"""
return self.service.get_neuroglancer_link(resource, resolution, x_range, y_range, z_range, self.url_prefix, **kwargs) |
def ParseArgs(self, args):
"""Parse and validate the args.
Note we pop all the args we consume here - so if there are any args we dont
know about, args will not be an empty dict after this. This allows the same
args to be parsed by several TypeDescriptorSets.
Args:
args: A dictionary of arguments that this TypeDescriptorSet might use. If
this dict does not have a required parameter, we still yield its default
value.
Yields:
A (name, value) tuple of the parsed args.
"""
for descriptor in self:
# Get the value from the kwargs or, if not specified, the default.
value = args.pop(descriptor.name, None)
if value is None:
# No need to validate the default value.
value = descriptor.default
else:
try:
# Validate this value - this should raise if the value provided is not
# acceptable to the type descriptor.
value = descriptor.Validate(value)
except Exception:
logging.error("Invalid value %s for arg %s", value, descriptor.name)
raise
yield descriptor.name, value | Parse and validate the args.
Note we pop all the args we consume here - so if there are any args we dont
know about, args will not be an empty dict after this. This allows the same
args to be parsed by several TypeDescriptorSets.
Args:
args: A dictionary of arguments that this TypeDescriptorSet might use. If
this dict does not have a required parameter, we still yield its default
value.
Yields:
A (name, value) tuple of the parsed args. | Below is the the instruction that describes the task:
### Input:
Parse and validate the args.
Note we pop all the args we consume here - so if there are any args we dont
know about, args will not be an empty dict after this. This allows the same
args to be parsed by several TypeDescriptorSets.
Args:
args: A dictionary of arguments that this TypeDescriptorSet might use. If
this dict does not have a required parameter, we still yield its default
value.
Yields:
A (name, value) tuple of the parsed args.
### Response:
def ParseArgs(self, args):
"""Parse and validate the args.
Note we pop all the args we consume here - so if there are any args we dont
know about, args will not be an empty dict after this. This allows the same
args to be parsed by several TypeDescriptorSets.
Args:
args: A dictionary of arguments that this TypeDescriptorSet might use. If
this dict does not have a required parameter, we still yield its default
value.
Yields:
A (name, value) tuple of the parsed args.
"""
for descriptor in self:
# Get the value from the kwargs or, if not specified, the default.
value = args.pop(descriptor.name, None)
if value is None:
# No need to validate the default value.
value = descriptor.default
else:
try:
# Validate this value - this should raise if the value provided is not
# acceptable to the type descriptor.
value = descriptor.Validate(value)
except Exception:
logging.error("Invalid value %s for arg %s", value, descriptor.name)
raise
yield descriptor.name, value |
def xrange(self, stream, start='-', stop='+', count=None):
"""Retrieve messages from a stream."""
if count is not None:
extra = ['COUNT', count]
else:
extra = []
fut = self.execute(b'XRANGE', stream, start, stop, *extra)
return wait_convert(fut, parse_messages) | Retrieve messages from a stream. | Below is the the instruction that describes the task:
### Input:
Retrieve messages from a stream.
### Response:
def xrange(self, stream, start='-', stop='+', count=None):
"""Retrieve messages from a stream."""
if count is not None:
extra = ['COUNT', count]
else:
extra = []
fut = self.execute(b'XRANGE', stream, start, stop, *extra)
return wait_convert(fut, parse_messages) |
def spellchecker(word):
"""
Looks for possible typos, i.e., deletion, insertion, transposition and
alteration. If the target is 'audreyr': deletion is 'adreyr', insertion is
'audreeyr', transposition is 'aurdeyr' and alteration is 'audriyr'.
Returns a list of possible words sorted by matching the same length.
"""
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b) > 1]
replaces = [a + c + b[1:] for a, b in splits for c in ALPHABET if b]
inserts = [a + c + b for a, b in splits for c in ALPHABET]
guesses = set(deletes + transposes + replaces + inserts)
sorted_guesses = sorted(guesses, key=lambda w: len(w) == len(word),
reverse=True)
return sorted_guesses | Looks for possible typos, i.e., deletion, insertion, transposition and
alteration. If the target is 'audreyr': deletion is 'adreyr', insertion is
'audreeyr', transposition is 'aurdeyr' and alteration is 'audriyr'.
Returns a list of possible words sorted by matching the same length. | Below is the the instruction that describes the task:
### Input:
Looks for possible typos, i.e., deletion, insertion, transposition and
alteration. If the target is 'audreyr': deletion is 'adreyr', insertion is
'audreeyr', transposition is 'aurdeyr' and alteration is 'audriyr'.
Returns a list of possible words sorted by matching the same length.
### Response:
def spellchecker(word):
"""
Looks for possible typos, i.e., deletion, insertion, transposition and
alteration. If the target is 'audreyr': deletion is 'adreyr', insertion is
'audreeyr', transposition is 'aurdeyr' and alteration is 'audriyr'.
Returns a list of possible words sorted by matching the same length.
"""
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b) > 1]
replaces = [a + c + b[1:] for a, b in splits for c in ALPHABET if b]
inserts = [a + c + b for a, b in splits for c in ALPHABET]
guesses = set(deletes + transposes + replaces + inserts)
sorted_guesses = sorted(guesses, key=lambda w: len(w) == len(word),
reverse=True)
return sorted_guesses |
def get_memory_usage(user=None):
"""
Returns a three-tupel with memory usage for the given user.
The result contains::
(total memory, largest process' memory, largest process name)
:param user: String representing the user. If `None`, the total size of
all processes for all users will be returned.
"""
total = 0
largest_process = 0
largest_process_name = None
for p in psutil.process_iter():
p_user = p.username()
if user is None or p_user == user:
try:
process_memory = p.memory_info()[0]
except psutil.AccessDenied:
continue
total += process_memory
if process_memory > largest_process:
largest_process = process_memory
largest_process_name = p.name()
return total, largest_process, largest_process_name | Returns a three-tupel with memory usage for the given user.
The result contains::
(total memory, largest process' memory, largest process name)
:param user: String representing the user. If `None`, the total size of
all processes for all users will be returned. | Below is the the instruction that describes the task:
### Input:
Returns a three-tupel with memory usage for the given user.
The result contains::
(total memory, largest process' memory, largest process name)
:param user: String representing the user. If `None`, the total size of
all processes for all users will be returned.
### Response:
def get_memory_usage(user=None):
"""
Returns a three-tupel with memory usage for the given user.
The result contains::
(total memory, largest process' memory, largest process name)
:param user: String representing the user. If `None`, the total size of
all processes for all users will be returned.
"""
total = 0
largest_process = 0
largest_process_name = None
for p in psutil.process_iter():
p_user = p.username()
if user is None or p_user == user:
try:
process_memory = p.memory_info()[0]
except psutil.AccessDenied:
continue
total += process_memory
if process_memory > largest_process:
largest_process = process_memory
largest_process_name = p.name()
return total, largest_process, largest_process_name |
def add_prefix(self, namespace, prefix):
"""Add a new namespace prefix.
If the root element has not yet been emitted the prefix will
be declared there, otherwise the prefix will be declared on the
top-most element using this namespace in every stanza.
:Parameters:
- `namespace`: the namespace URI
- `prefix`: the prefix string
:Types:
- `namespace`: `unicode`
- `prefix`: `unicode`
"""
if prefix == "xml" and namespace != XML_NS:
raise ValueError, "Cannot change 'xml' prefix meaning"
self._prefixes[namespace] = prefix | Add a new namespace prefix.
If the root element has not yet been emitted the prefix will
be declared there, otherwise the prefix will be declared on the
top-most element using this namespace in every stanza.
:Parameters:
- `namespace`: the namespace URI
- `prefix`: the prefix string
:Types:
- `namespace`: `unicode`
- `prefix`: `unicode` | Below is the the instruction that describes the task:
### Input:
Add a new namespace prefix.
If the root element has not yet been emitted the prefix will
be declared there, otherwise the prefix will be declared on the
top-most element using this namespace in every stanza.
:Parameters:
- `namespace`: the namespace URI
- `prefix`: the prefix string
:Types:
- `namespace`: `unicode`
- `prefix`: `unicode`
### Response:
def add_prefix(self, namespace, prefix):
"""Add a new namespace prefix.
If the root element has not yet been emitted the prefix will
be declared there, otherwise the prefix will be declared on the
top-most element using this namespace in every stanza.
:Parameters:
- `namespace`: the namespace URI
- `prefix`: the prefix string
:Types:
- `namespace`: `unicode`
- `prefix`: `unicode`
"""
if prefix == "xml" and namespace != XML_NS:
raise ValueError, "Cannot change 'xml' prefix meaning"
self._prefixes[namespace] = prefix |
def delete_hosted_zone_by_domain(Name, PrivateZone=None, region=None, key=None, keyid=None,
profile=None):
'''
Delete a Route53 hosted zone by domain name, and PrivateZone status if provided.
CLI Example::
salt myminion boto3_route53.delete_hosted_zone_by_domain example.org.
'''
args = {'Name': Name, 'PrivateZone': PrivateZone,
'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
# Be extra pedantic in the service of safety - if public/private is not provided and the domain
# name resolves to both, fail and require them to declare it explicitly.
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return False
Id = zone[0]['HostedZone']['Id']
return delete_hosted_zone(Id=Id, region=region, key=key, keyid=keyid, profile=profile) | Delete a Route53 hosted zone by domain name, and PrivateZone status if provided.
CLI Example::
salt myminion boto3_route53.delete_hosted_zone_by_domain example.org. | Below is the the instruction that describes the task:
### Input:
Delete a Route53 hosted zone by domain name, and PrivateZone status if provided.
CLI Example::
salt myminion boto3_route53.delete_hosted_zone_by_domain example.org.
### Response:
def delete_hosted_zone_by_domain(Name, PrivateZone=None, region=None, key=None, keyid=None,
profile=None):
'''
Delete a Route53 hosted zone by domain name, and PrivateZone status if provided.
CLI Example::
salt myminion boto3_route53.delete_hosted_zone_by_domain example.org.
'''
args = {'Name': Name, 'PrivateZone': PrivateZone,
'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
# Be extra pedantic in the service of safety - if public/private is not provided and the domain
# name resolves to both, fail and require them to declare it explicitly.
zone = find_hosted_zone(**args)
if not zone:
log.error("Couldn't resolve domain name %s to a hosted zone ID.", Name)
return False
Id = zone[0]['HostedZone']['Id']
return delete_hosted_zone(Id=Id, region=region, key=key, keyid=keyid, profile=profile) |
def is_valid_coordinate(img, i, j, k):
"""Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries.
Parameters
----------
@param img:
@param i:
@param j:
@param k:
Returns
-------
bool
"""
imgx, imgy, imgz = get_shape(img)
return (i >= 0 and i < imgx) and \
(j >= 0 and j < imgy) and \
(k >= 0 and k < imgz) | Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries.
Parameters
----------
@param img:
@param i:
@param j:
@param k:
Returns
-------
bool | Below is the the instruction that describes the task:
### Input:
Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries.
Parameters
----------
@param img:
@param i:
@param j:
@param k:
Returns
-------
bool
### Response:
def is_valid_coordinate(img, i, j, k):
"""Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries.
Parameters
----------
@param img:
@param i:
@param j:
@param k:
Returns
-------
bool
"""
imgx, imgy, imgz = get_shape(img)
return (i >= 0 and i < imgx) and \
(j >= 0 and j < imgy) and \
(k >= 0 and k < imgz) |
def populate_fw_dev(self, fw_id, mgmt_ip, new):
"""Populate the class after a restart. """
for cnt in self.res:
used = self.res.get(cnt).get('used')
if mgmt_ip == self.res[cnt].get('mgmt_ip'):
if new:
self.res[cnt]['used'] = used + 1
self.res[cnt]['fw_id_lst'].append(fw_id)
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | Populate the class after a restart. | Below is the the instruction that describes the task:
### Input:
Populate the class after a restart.
### Response:
def populate_fw_dev(self, fw_id, mgmt_ip, new):
"""Populate the class after a restart. """
for cnt in self.res:
used = self.res.get(cnt).get('used')
if mgmt_ip == self.res[cnt].get('mgmt_ip'):
if new:
self.res[cnt]['used'] = used + 1
self.res[cnt]['fw_id_lst'].append(fw_id)
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None |
def jacobian(f, v, v_mapping):
"""
f: single fluxion object or an array or list of fluxions, representing a scalar or vector function
v: vector of variables in f with respect to which the Jacobian will be calculated
v_mapping: dict mapping variables in f to scalar or vector of values
"""
# make sure f is in the form np.array([fl1, ...])
if isinstance(f, Fluxion):
f = [f]
f = np.asarray(f)
v = np.asarray(v)
v_mapping = _check_input_vals(v_mapping)
m = len(v)
n = len(f)
T = len(list(v_mapping.values())[0]) # number of values per variable
J = np.zeros((m,n,T))
for i, f_i in enumerate(f):
seed = dict.fromkeys(v, 1)
dfi_dvj = f_i.diff(v_mapping, seed)
J[:,i,:] = dfi_dvj.T
return J.squeeze() | f: single fluxion object or an array or list of fluxions, representing a scalar or vector function
v: vector of variables in f with respect to which the Jacobian will be calculated
v_mapping: dict mapping variables in f to scalar or vector of values | Below is the the instruction that describes the task:
### Input:
f: single fluxion object or an array or list of fluxions, representing a scalar or vector function
v: vector of variables in f with respect to which the Jacobian will be calculated
v_mapping: dict mapping variables in f to scalar or vector of values
### Response:
def jacobian(f, v, v_mapping):
"""
f: single fluxion object or an array or list of fluxions, representing a scalar or vector function
v: vector of variables in f with respect to which the Jacobian will be calculated
v_mapping: dict mapping variables in f to scalar or vector of values
"""
# make sure f is in the form np.array([fl1, ...])
if isinstance(f, Fluxion):
f = [f]
f = np.asarray(f)
v = np.asarray(v)
v_mapping = _check_input_vals(v_mapping)
m = len(v)
n = len(f)
T = len(list(v_mapping.values())[0]) # number of values per variable
J = np.zeros((m,n,T))
for i, f_i in enumerate(f):
seed = dict.fromkeys(v, 1)
dfi_dvj = f_i.diff(v_mapping, seed)
J[:,i,:] = dfi_dvj.T
return J.squeeze() |
def users_changed_handler(stream):
"""
Sends connected client list of currently active users in the chatroom
"""
while True:
yield from stream.get()
# Get list list of current active users
users = [
{'username': username, 'uuid': uuid_str}
for username, uuid_str in ws_connections.values()
]
# Make packet with list of new users (sorted by username)
packet = {
'type': 'users-changed',
'value': sorted(users, key=lambda i: i['username'])
}
logger.debug(packet)
yield from fanout_message(ws_connections.keys(), packet) | Sends connected client list of currently active users in the chatroom | Below is the the instruction that describes the task:
### Input:
Sends connected client list of currently active users in the chatroom
### Response:
def users_changed_handler(stream):
"""
Sends connected client list of currently active users in the chatroom
"""
while True:
yield from stream.get()
# Get list list of current active users
users = [
{'username': username, 'uuid': uuid_str}
for username, uuid_str in ws_connections.values()
]
# Make packet with list of new users (sorted by username)
packet = {
'type': 'users-changed',
'value': sorted(users, key=lambda i: i['username'])
}
logger.debug(packet)
yield from fanout_message(ws_connections.keys(), packet) |
def plot_ortho_stack(images, overlays=None, reorient=True,
# xyz arguments
xyz=None, xyz_lines=False, xyz_color='red', xyz_alpha=0.6, xyz_linewidth=2, xyz_pad=5,
# base image arguments
cmap='Greys_r', alpha=1,
# overlay arguments
overlay_cmap='jet', overlay_alpha=0.9,
# background arguments
black_bg=True, bg_thresh_quant=0.01, bg_val_quant=0.99,
# scale/crop/domain arguments
crop=False, scale=False, domain_image_map=None,
# title arguments
title=None, titlefontsize=24, title_dx=0, title_dy=0,
# 4th panel text arguemnts
text=None, textfontsize=24, textfontcolor='white', text_dx=0, text_dy=0,
# save & size arguments
filename=None, dpi=500, figsize=1., colpad=0, rowpad=0,
transpose=False, transparent=True):
"""
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> ch2 = ants.image_read(ants.get_data('ch2'))
>>> ants.plot_ortho_stack([mni,mni,mni])
"""
def mirror_matrix(x):
return x[::-1,:]
def rotate270_matrix(x):
return mirror_matrix(x.T)
def reorient_slice(x, axis):
return rotate270_matrix(x)
# need this hack because of a weird NaN warning from matplotlib with overlays
warnings.simplefilter('ignore')
n_images = len(images)
# handle `image` argument
for i in range(n_images):
if isinstance(images[i], str):
images[i] = iio2.image_read(images[i])
if not isinstance(images[i], iio.ANTsImage):
raise ValueError('image argument must be an ANTsImage')
if images[i].dimension != 3:
raise ValueError('Input image must have 3 dimensions!')
if overlays is None:
overlays = [None]*n_images
# handle `overlay` argument
for i in range(n_images):
if overlays[i] is not None:
if isinstance(overlays[i], str):
overlays[i] = iio2.image_read(overlays[i])
if not isinstance(overlays[i], iio.ANTsImage):
raise ValueError('overlay argument must be an ANTsImage')
if overlays[i].dimension != 3:
raise ValueError('Overlay image must have 3 dimensions!')
if not iio.image_physical_space_consistency(images[i], overlays[i]):
overlays[i] = reg.resample_image_to_target(overlays[i], images[i], interp_type='linear')
for i in range(1,n_images):
if not iio.image_physical_space_consistency(images[0], images[i]):
images[i] = reg.resample_image_to_target(images[0], images[i], interp_type='linear')
# reorient images
if reorient != False:
if reorient == True:
reorient = 'RPI'
for i in range(n_images):
images[i] = images[i].reorient_image2(reorient)
if overlays[i] is not None:
overlays[i] = overlays[i].reorient_image2(reorient)
# handle `slices` argument
if xyz is None:
xyz = [int(s/2) for s in images[0].shape]
for i in range(3):
if xyz[i] is None:
xyz[i] = int(images[0].shape[i]/2)
# resample image if spacing is very unbalanced
spacing = [s for i,s in enumerate(images[0].spacing)]
if (max(spacing) / min(spacing)) > 3.:
new_spacing = (1,1,1)
for i in range(n_images):
images[i] = images[i].resample_image(tuple(new_spacing))
if overlays[i] is not None:
overlays[i] = overlays[i].resample_image(tuple(new_spacing))
xyz = [int(sl*(sold/snew)) for sl,sold,snew in zip(xyz,spacing,new_spacing)]
# potentially crop image
if crop:
for i in range(n_images):
plotmask = images[i].get_mask(cleanup=0)
if plotmask.max() == 0:
plotmask += 1
images[i] = images[i].crop_image(plotmask)
if overlays[i] is not None:
overlays[i] = overlays[i].crop_image(plotmask)
# pad images
for i in range(n_images):
if i == 0:
images[i], lowpad, uppad = images[i].pad_image(return_padvals=True)
else:
images[i] = images[i].pad_image()
if overlays[i] is not None:
overlays[i] = overlays[i].pad_image()
xyz = [v+l for v,l in zip(xyz,lowpad)]
# handle `domain_image_map` argument
if domain_image_map is not None:
if isinstance(domain_image_map, iio.ANTsImage):
tx = tio2.new_ants_transform(precision='float', transform_type='AffineTransform',
dimension=3)
for i in range(n_images):
images[i] = tio.apply_ants_transform_to_image(tx, images[i], domain_image_map)
if overlays[i] is not None:
overlays[i] = tio.apply_ants_transform_to_image(tx, overlays[i],
domain_image_map,
interpolation='linear')
elif isinstance(domain_image_map, (list, tuple)):
# expect an image and transformation
if len(domain_image_map) != 2:
raise ValueError('domain_image_map list or tuple must have length == 2')
dimg = domain_image_map[0]
if not isinstance(dimg, iio.ANTsImage):
raise ValueError('domain_image_map first entry should be ANTsImage')
tx = domain_image_map[1]
for i in range(n_images):
images[i] = reg.apply_transforms(dimg, images[i], transform_list=tx)
if overlays[i] is not None:
overlays[i] = reg.apply_transforms(dimg, overlays[i], transform_list=tx,
interpolator='linear')
# potentially find dynamic range
if scale == True:
vmins = []
vmaxs = []
for i in range(n_images):
vmin, vmax = images[i].quantile((0.05,0.95))
vmins.append(vmin)
vmaxs.append(vmax)
elif isinstance(scale, (list,tuple)):
if len(scale) != 2:
raise ValueError('scale argument must be boolean or list/tuple with two values')
vmins = []
vmaxs = []
for i in range(n_images):
vmin, vmax = images[i].quantile(scale)
vmins.append(vmin)
vmaxs.append(vmax)
else:
vmin = None
vmax = None
if not transpose:
nrow = n_images
ncol = 3
else:
nrow = 3
ncol = n_images
fig = plt.figure(figsize=((ncol+1)*2.5*figsize, (nrow+1)*2.5*figsize))
if title is not None:
basey = 0.93
basex = 0.5
fig.suptitle(title, fontsize=titlefontsize, x=basex+title_dx, y=basey+title_dy)
if (colpad > 0) and (rowpad > 0):
bothgridpad = max(colpad, rowpad)
colpad = 0
rowpad = 0
else:
bothgridpad = 0.0
gs = gridspec.GridSpec(nrow, ncol, wspace=bothgridpad, hspace=0.0,
top=1.-0.5/(nrow+1), bottom=0.5/(nrow+1) + colpad,
left=0.5/(ncol+1) + rowpad, right=1-0.5/(ncol+1))
# pad image to have isotropic array dimensions
for i in range(n_images):
images[i] = images[i].numpy()
if overlays[i] is not None:
overlays[i] = overlays[i].numpy()
overlays[i][np.abs(overlays[i]) == 0] = np.nan
####################
####################
for i in range(n_images):
yz_slice = reorient_slice(images[i][xyz[0],:,:],0)
if not transpose:
ax = plt.subplot(gs[i,0])
else:
ax = plt.subplot(gs[0,i])
ax.imshow(yz_slice, cmap=cmap, vmin=vmin, vmax=vmax)
if overlays[i] is not None:
yz_overlay = reorient_slice(overlays[i][xyz[0],:,:],0)
ax.imshow(yz_overlay, alpha=overlay_alpha, cmap=overlay_cmap)
if xyz_lines:
# add lines
l = mlines.Line2D([yz_slice.shape[0]-xyz[1],yz_slice.shape[0]-xyz[1]],
[xyz_pad,yz_slice.shape[0]-xyz_pad],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
l = mlines.Line2D([xyz_pad,yz_slice.shape[1]-xyz_pad],
[yz_slice.shape[1]-xyz[2],yz_slice.shape[1]-xyz[2]],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
ax.axis('off')
####################
####################
xz_slice = reorient_slice(images[i][:,xyz[1],:],1)
if not transpose:
ax = plt.subplot(gs[i,1])
else:
ax = plt.subplot(gs[1,i])
ax.imshow(xz_slice, cmap=cmap, vmin=vmin, vmax=vmax)
if overlays[i] is not None:
xz_overlay = reorient_slice(overlays[i][:,xyz[1],:],1)
ax.imshow(xz_overlay, alpha=overlay_alpha, cmap=overlay_cmap)
if xyz_lines:
# add lines
l = mlines.Line2D([xz_slice.shape[0]-xyz[0],xz_slice.shape[0]-xyz[0]],
[xyz_pad,xz_slice.shape[0]-xyz_pad],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
l = mlines.Line2D([xyz_pad,xz_slice.shape[1]-xyz_pad],
[xz_slice.shape[1]-xyz[2],xz_slice.shape[1]-xyz[2]],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
ax.axis('off')
####################
####################
xy_slice = reorient_slice(images[i][:,:,xyz[2]],2)
if not transpose:
ax = plt.subplot(gs[i,2])
else:
ax = plt.subplot(gs[2,i])
ax.imshow(xy_slice, cmap=cmap, vmin=vmin, vmax=vmax)
if overlays[i] is not None:
xy_overlay = reorient_slice(overlays[i][:,:,xyz[2]],2)
ax.imshow(xy_overlay, alpha=overlay_alpha, cmap=overlay_cmap)
if xyz_lines:
# add lines
l = mlines.Line2D([xy_slice.shape[0]-xyz[0],xy_slice.shape[0]-xyz[0]],
[xyz_pad,xy_slice.shape[0]-xyz_pad],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
l = mlines.Line2D([xyz_pad,xy_slice.shape[1]-xyz_pad],
[xy_slice.shape[1]-xyz[1],xy_slice.shape[1]-xyz[1]],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
ax.axis('off')
####################
####################
if filename is not None:
plt.savefig(filename, dpi=dpi, transparent=transparent)
plt.close(fig)
else:
plt.show()
# turn warnings back to default
warnings.simplefilter('default') | Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> ch2 = ants.image_read(ants.get_data('ch2'))
>>> ants.plot_ortho_stack([mni,mni,mni]) | Below is the the instruction that describes the task:
### Input:
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> ch2 = ants.image_read(ants.get_data('ch2'))
>>> ants.plot_ortho_stack([mni,mni,mni])
### Response:
def plot_ortho_stack(images, overlays=None, reorient=True,
# xyz arguments
xyz=None, xyz_lines=False, xyz_color='red', xyz_alpha=0.6, xyz_linewidth=2, xyz_pad=5,
# base image arguments
cmap='Greys_r', alpha=1,
# overlay arguments
overlay_cmap='jet', overlay_alpha=0.9,
# background arguments
black_bg=True, bg_thresh_quant=0.01, bg_val_quant=0.99,
# scale/crop/domain arguments
crop=False, scale=False, domain_image_map=None,
# title arguments
title=None, titlefontsize=24, title_dx=0, title_dy=0,
# 4th panel text arguemnts
text=None, textfontsize=24, textfontcolor='white', text_dx=0, text_dy=0,
# save & size arguments
filename=None, dpi=500, figsize=1., colpad=0, rowpad=0,
transpose=False, transparent=True):
"""
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> ch2 = ants.image_read(ants.get_data('ch2'))
>>> ants.plot_ortho_stack([mni,mni,mni])
"""
def mirror_matrix(x):
return x[::-1,:]
def rotate270_matrix(x):
return mirror_matrix(x.T)
def reorient_slice(x, axis):
return rotate270_matrix(x)
# need this hack because of a weird NaN warning from matplotlib with overlays
warnings.simplefilter('ignore')
n_images = len(images)
# handle `image` argument
for i in range(n_images):
if isinstance(images[i], str):
images[i] = iio2.image_read(images[i])
if not isinstance(images[i], iio.ANTsImage):
raise ValueError('image argument must be an ANTsImage')
if images[i].dimension != 3:
raise ValueError('Input image must have 3 dimensions!')
if overlays is None:
overlays = [None]*n_images
# handle `overlay` argument
for i in range(n_images):
if overlays[i] is not None:
if isinstance(overlays[i], str):
overlays[i] = iio2.image_read(overlays[i])
if not isinstance(overlays[i], iio.ANTsImage):
raise ValueError('overlay argument must be an ANTsImage')
if overlays[i].dimension != 3:
raise ValueError('Overlay image must have 3 dimensions!')
if not iio.image_physical_space_consistency(images[i], overlays[i]):
overlays[i] = reg.resample_image_to_target(overlays[i], images[i], interp_type='linear')
for i in range(1,n_images):
if not iio.image_physical_space_consistency(images[0], images[i]):
images[i] = reg.resample_image_to_target(images[0], images[i], interp_type='linear')
# reorient images
if reorient != False:
if reorient == True:
reorient = 'RPI'
for i in range(n_images):
images[i] = images[i].reorient_image2(reorient)
if overlays[i] is not None:
overlays[i] = overlays[i].reorient_image2(reorient)
# handle `slices` argument
if xyz is None:
xyz = [int(s/2) for s in images[0].shape]
for i in range(3):
if xyz[i] is None:
xyz[i] = int(images[0].shape[i]/2)
# resample image if spacing is very unbalanced
spacing = [s for i,s in enumerate(images[0].spacing)]
if (max(spacing) / min(spacing)) > 3.:
new_spacing = (1,1,1)
for i in range(n_images):
images[i] = images[i].resample_image(tuple(new_spacing))
if overlays[i] is not None:
overlays[i] = overlays[i].resample_image(tuple(new_spacing))
xyz = [int(sl*(sold/snew)) for sl,sold,snew in zip(xyz,spacing,new_spacing)]
# potentially crop image
if crop:
for i in range(n_images):
plotmask = images[i].get_mask(cleanup=0)
if plotmask.max() == 0:
plotmask += 1
images[i] = images[i].crop_image(plotmask)
if overlays[i] is not None:
overlays[i] = overlays[i].crop_image(plotmask)
# pad images
for i in range(n_images):
if i == 0:
images[i], lowpad, uppad = images[i].pad_image(return_padvals=True)
else:
images[i] = images[i].pad_image()
if overlays[i] is not None:
overlays[i] = overlays[i].pad_image()
xyz = [v+l for v,l in zip(xyz,lowpad)]
# handle `domain_image_map` argument
if domain_image_map is not None:
if isinstance(domain_image_map, iio.ANTsImage):
tx = tio2.new_ants_transform(precision='float', transform_type='AffineTransform',
dimension=3)
for i in range(n_images):
images[i] = tio.apply_ants_transform_to_image(tx, images[i], domain_image_map)
if overlays[i] is not None:
overlays[i] = tio.apply_ants_transform_to_image(tx, overlays[i],
domain_image_map,
interpolation='linear')
elif isinstance(domain_image_map, (list, tuple)):
# expect an image and transformation
if len(domain_image_map) != 2:
raise ValueError('domain_image_map list or tuple must have length == 2')
dimg = domain_image_map[0]
if not isinstance(dimg, iio.ANTsImage):
raise ValueError('domain_image_map first entry should be ANTsImage')
tx = domain_image_map[1]
for i in range(n_images):
images[i] = reg.apply_transforms(dimg, images[i], transform_list=tx)
if overlays[i] is not None:
overlays[i] = reg.apply_transforms(dimg, overlays[i], transform_list=tx,
interpolator='linear')
# potentially find dynamic range
if scale == True:
vmins = []
vmaxs = []
for i in range(n_images):
vmin, vmax = images[i].quantile((0.05,0.95))
vmins.append(vmin)
vmaxs.append(vmax)
elif isinstance(scale, (list,tuple)):
if len(scale) != 2:
raise ValueError('scale argument must be boolean or list/tuple with two values')
vmins = []
vmaxs = []
for i in range(n_images):
vmin, vmax = images[i].quantile(scale)
vmins.append(vmin)
vmaxs.append(vmax)
else:
vmin = None
vmax = None
if not transpose:
nrow = n_images
ncol = 3
else:
nrow = 3
ncol = n_images
fig = plt.figure(figsize=((ncol+1)*2.5*figsize, (nrow+1)*2.5*figsize))
if title is not None:
basey = 0.93
basex = 0.5
fig.suptitle(title, fontsize=titlefontsize, x=basex+title_dx, y=basey+title_dy)
if (colpad > 0) and (rowpad > 0):
bothgridpad = max(colpad, rowpad)
colpad = 0
rowpad = 0
else:
bothgridpad = 0.0
gs = gridspec.GridSpec(nrow, ncol, wspace=bothgridpad, hspace=0.0,
top=1.-0.5/(nrow+1), bottom=0.5/(nrow+1) + colpad,
left=0.5/(ncol+1) + rowpad, right=1-0.5/(ncol+1))
# pad image to have isotropic array dimensions
for i in range(n_images):
images[i] = images[i].numpy()
if overlays[i] is not None:
overlays[i] = overlays[i].numpy()
overlays[i][np.abs(overlays[i]) == 0] = np.nan
####################
####################
for i in range(n_images):
yz_slice = reorient_slice(images[i][xyz[0],:,:],0)
if not transpose:
ax = plt.subplot(gs[i,0])
else:
ax = plt.subplot(gs[0,i])
ax.imshow(yz_slice, cmap=cmap, vmin=vmin, vmax=vmax)
if overlays[i] is not None:
yz_overlay = reorient_slice(overlays[i][xyz[0],:,:],0)
ax.imshow(yz_overlay, alpha=overlay_alpha, cmap=overlay_cmap)
if xyz_lines:
# add lines
l = mlines.Line2D([yz_slice.shape[0]-xyz[1],yz_slice.shape[0]-xyz[1]],
[xyz_pad,yz_slice.shape[0]-xyz_pad],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
l = mlines.Line2D([xyz_pad,yz_slice.shape[1]-xyz_pad],
[yz_slice.shape[1]-xyz[2],yz_slice.shape[1]-xyz[2]],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
ax.axis('off')
####################
####################
xz_slice = reorient_slice(images[i][:,xyz[1],:],1)
if not transpose:
ax = plt.subplot(gs[i,1])
else:
ax = plt.subplot(gs[1,i])
ax.imshow(xz_slice, cmap=cmap, vmin=vmin, vmax=vmax)
if overlays[i] is not None:
xz_overlay = reorient_slice(overlays[i][:,xyz[1],:],1)
ax.imshow(xz_overlay, alpha=overlay_alpha, cmap=overlay_cmap)
if xyz_lines:
# add lines
l = mlines.Line2D([xz_slice.shape[0]-xyz[0],xz_slice.shape[0]-xyz[0]],
[xyz_pad,xz_slice.shape[0]-xyz_pad],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
l = mlines.Line2D([xyz_pad,xz_slice.shape[1]-xyz_pad],
[xz_slice.shape[1]-xyz[2],xz_slice.shape[1]-xyz[2]],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
ax.axis('off')
####################
####################
xy_slice = reorient_slice(images[i][:,:,xyz[2]],2)
if not transpose:
ax = plt.subplot(gs[i,2])
else:
ax = plt.subplot(gs[2,i])
ax.imshow(xy_slice, cmap=cmap, vmin=vmin, vmax=vmax)
if overlays[i] is not None:
xy_overlay = reorient_slice(overlays[i][:,:,xyz[2]],2)
ax.imshow(xy_overlay, alpha=overlay_alpha, cmap=overlay_cmap)
if xyz_lines:
# add lines
l = mlines.Line2D([xy_slice.shape[0]-xyz[0],xy_slice.shape[0]-xyz[0]],
[xyz_pad,xy_slice.shape[0]-xyz_pad],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
l = mlines.Line2D([xyz_pad,xy_slice.shape[1]-xyz_pad],
[xy_slice.shape[1]-xyz[1],xy_slice.shape[1]-xyz[1]],
color=xyz_color, alpha=xyz_alpha, linewidth=xyz_linewidth)
ax.add_line(l)
ax.axis('off')
####################
####################
if filename is not None:
plt.savefig(filename, dpi=dpi, transparent=transparent)
plt.close(fig)
else:
plt.show()
# turn warnings back to default
warnings.simplefilter('default') |
def extract_graphs_and_lemmas_from_tweets(tweet_generator):
"""
Given a tweet python generator, we encode the information into mention and retweet graphs and a lemma matrix.
We assume that the tweets are given in increasing timestamp.
Inputs: - tweet_generator: A python generator of tweets in python dictionary (json) format.
Outputs: - mention_graph: The mention graph as a SciPy sparse matrix.
- retweet_graph: The retweet graph as a SciPy sparse matrix.
- user_lemma_matrix: The user lemma vector representation matrix as a SciPy sparse matrix.
- tweet_id_set: A python set containing the Twitter ids for all the dataset tweets.
- user_id_set: A python set containing the Twitter ids for all the dataset users.
- lemma_to_attribute: A map from lemmas to numbers in python dictionary format.
"""
####################################################################################################################
# Prepare for iterating over tweets.
####################################################################################################################
# These are initialized as lists for incremental extension.
tweet_id_set = set()
user_id_set = list()
add_tweet_id = tweet_id_set.add
append_user_id = user_id_set.append
# Initialize sparse matrix arrays.
mention_graph_row = list()
mention_graph_col = list()
retweet_graph_row = list()
retweet_graph_col = list()
user_lemma_matrix_row = list()
user_lemma_matrix_col = list()
user_lemma_matrix_data = list()
append_mention_graph_row = mention_graph_row.append
append_mention_graph_col = mention_graph_col.append
append_retweet_graph_row = retweet_graph_row.append
append_retweet_graph_col = retweet_graph_col.append
extend_user_lemma_matrix_row = user_lemma_matrix_row.extend
extend_user_lemma_matrix_col = user_lemma_matrix_col.extend
extend_user_lemma_matrix_data = user_lemma_matrix_data.extend
# Initialize dictionaries.
id_to_node = dict()
id_to_name = dict()
id_to_username = dict()
id_to_listedcount = dict()
lemma_to_attribute = dict()
sent_tokenize, _treebank_word_tokenize = get_tokenizer()
# tagger = HunposTagger('hunpos-1.0-linux/english.model', 'hunpos-1.0-linux/hunpos-tag')
# tagger = PerceptronTagger()
tagger = get_braupt_tagger()
lemmatizer, lemmatize = get_lemmatizer("wordnet")
stopset = get_stopset()
first_cap_re, all_cap_re = get_camel_case_regexes()
digits_punctuation_whitespace_re = get_digits_punctuation_whitespace_regex()
pos_set = get_pos_set()
####################################################################################################################
# Iterate over tweets.
####################################################################################################################
counter = 0
for tweet in tweet_generator:
# print(tweet)
# Increment tweet counter.
counter += 1
# if counter % 10000 == 0:
# print(counter)
# print(counter)
# Extract base tweet's values.
try:
tweet_id = tweet["id"]
user_id = tweet["user"]["id"]
user_name = tweet["user"]["name"]
user_screen_name = tweet["user"]["screen_name"]
listed_count_raw = tweet["user"]["listed_count"]
tweet_text = tweet["text"]
tweet_in_reply_to_user_id = tweet["in_reply_to_user_id"]
tweet_in_reply_to_screen_name = tweet["in_reply_to_screen_name"]
tweet_entities_user_mentions = tweet["entities"]["user_mentions"]
if "retweeted_status" not in tweet.keys():
user_mention_id_list = list()
user_mention_screen_name_list = list()
for user_mention in tweet_entities_user_mentions:
user_mention_id_list.append(user_mention["id"])
user_mention_screen_name_list.append(user_mention["screen_name"])
else:
# Extract base tweet's values.
original_tweet = tweet["retweeted_status"]
original_tweet_id = original_tweet["id"]
original_tweet_user_id = original_tweet["user"]["id"]
original_tweet_user_name = original_tweet["user"]["name"]
original_tweet_user_screen_name = original_tweet["user"]["screen_name"]
listed_count_raw = original_tweet["user"]["listed_count"]
original_tweet_text = original_tweet["text"]
original_tweet_in_reply_to_user_id = original_tweet["in_reply_to_user_id"]
original_tweet_in_reply_to_screen_name = original_tweet["in_reply_to_screen_name"]
original_tweet_entities_user_mentions = original_tweet["entities"]["user_mentions"]
original_tweet_user_mention_id_list = list()
original_tweet_user_mention_screen_name_list = list()
for user_mention in original_tweet_entities_user_mentions:
original_tweet_user_mention_id_list.append(user_mention["id"])
original_tweet_user_mention_screen_name_list.append(user_mention["screen_name"])
except KeyError:
continue
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
source_node = id_to_node.setdefault(user_id, graph_size)
if listed_count_raw is None:
id_to_listedcount[user_id] = 0
else:
id_to_listedcount[user_id] = int(listed_count_raw)
# Update sets, lists and dictionaries.
add_tweet_id(tweet_id)
id_to_name[user_id] = user_screen_name
id_to_username[user_id] = user_name
append_user_id(user_id)
################################################################################################################
# We are dealing with an original tweet.
################################################################################################################
if "retweeted_status" not in tweet.keys():
############################################################################################################
# Update user-lemma frequency matrix.
############################################################################################################
# Extract lemmas from the text.
tweet_lemmas, lemma_to_keywordbag = clean_document(tweet_text, sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set)
number_of_lemmas = len(tweet_lemmas)
# Update the user-lemma frequency matrix one-by-one.
attribute_list = list()
append_attribute = attribute_list.append
for lemma in tweet_lemmas:
# Map lemmas to distinct integer numbers.
vocabulary_size = len(lemma_to_attribute)
attribute = lemma_to_attribute.setdefault(lemma, vocabulary_size)
append_attribute(attribute)
# Add values to the sparse matrix arrays.
extend_user_lemma_matrix_row(number_of_lemmas*[source_node])
extend_user_lemma_matrix_col(attribute_list)
extend_user_lemma_matrix_data(number_of_lemmas*[1.0])
############################################################################################################
# Update mention matrix.
############################################################################################################
# Get mentioned user ids.
mentioned_user_id_set = list()
if tweet_in_reply_to_user_id is not None:
mentioned_user_id_set.append(tweet_in_reply_to_user_id)
id_to_name[tweet_in_reply_to_user_id] = tweet_in_reply_to_screen_name
for user_mention, mentioned_user_id, mentioned_user_screen_name in zip(tweet_entities_user_mentions,
user_mention_id_list,
user_mention_screen_name_list):
mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = mentioned_user_screen_name
# We remove duplicates.
mentioned_user_id_set = set(mentioned_user_id_set)
# Update the mention graph one-by-one.
for mentioned_user_id in mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(source_node)
append_mention_graph_col(mention_target_node)
################################################################################################################
# We are dealing with a retweet.
################################################################################################################
else:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
original_tweet_node = id_to_node.setdefault(original_tweet_user_id, graph_size)
if listed_count_raw is None:
id_to_listedcount[user_id] = 0
else:
id_to_listedcount[user_id] = int(listed_count_raw)
# Update retweet graph.
append_retweet_graph_row(source_node)
append_retweet_graph_col(original_tweet_node)
# Extract lemmas from the text.
tweet_lemmas, lemma_to_keywordbag = clean_document(original_tweet_text, sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set)
number_of_lemmas = len(tweet_lemmas)
# Update the user-lemma frequency matrix one-by-one.
attribute_list = list()
append_attribute = attribute_list.append
for lemma in tweet_lemmas:
# Map lemmas to distinct integer numbers.
vocabulary_size = len(lemma_to_attribute)
attribute = lemma_to_attribute.setdefault(lemma, vocabulary_size)
append_attribute(attribute)
# Add values to the sparse matrix arrays.
extend_user_lemma_matrix_row(number_of_lemmas*[source_node])
extend_user_lemma_matrix_col(attribute_list)
extend_user_lemma_matrix_data(number_of_lemmas*[1.0])
# Get mentioned user ids.
mentioned_user_id_set = list()
if original_tweet_in_reply_to_user_id is not None:
mentioned_user_id_set.append(original_tweet_in_reply_to_user_id)
id_to_name[original_tweet_in_reply_to_user_id] = original_tweet_in_reply_to_screen_name
for user_mention, mentioned_user_id, mentioned_user_screen_name in zip(original_tweet_entities_user_mentions,
original_tweet_user_mention_id_list,
original_tweet_user_mention_screen_name_list):
mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = mentioned_user_screen_name
# We remove duplicates.
mentioned_user_id_set = set(mentioned_user_id_set)
# Get mentioned user ids.
retweet_mentioned_user_id_set = list()
if original_tweet_in_reply_to_user_id is not None:
retweet_mentioned_user_id_set.append(original_tweet_in_reply_to_user_id)
id_to_name[original_tweet_in_reply_to_user_id] = original_tweet_in_reply_to_screen_name
for user_mention, mentioned_user_id, mentioned_user_screen_name in zip(original_tweet_entities_user_mentions,
original_tweet_user_mention_id_list,
original_tweet_user_mention_screen_name_list):
retweet_mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = mentioned_user_screen_name
# We remove duplicates.
retweet_mentioned_user_id_set = set(retweet_mentioned_user_id_set)
mentioned_user_id_set.update(retweet_mentioned_user_id_set)
# Update the mention graph one-by-one.
for mentioned_user_id in mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(source_node)
append_mention_graph_col(mention_target_node)
# This is the first time we deal with this tweet.
if original_tweet_id not in tweet_id_set:
# Update sets, lists and dictionaries.
add_tweet_id(original_tweet_id)
id_to_name[original_tweet_user_id] = original_tweet_user_screen_name
id_to_username[original_tweet_user_id] = original_tweet_user_name
append_user_id(original_tweet_user_id)
########################################################################################################
# Update user-lemma frequency matrix.
########################################################################################################
# Update the user-lemma frequency matrix one-by-one.
attribute_list = list()
append_attribute = attribute_list.append
for lemma in tweet_lemmas:
# Map lemmas to distinct integer numbers.
vocabulary_size = len(lemma_to_attribute)
attribute = lemma_to_attribute.setdefault(lemma, vocabulary_size)
append_attribute(attribute)
# Add values to the sparse matrix arrays.
extend_user_lemma_matrix_row(number_of_lemmas*[source_node])
extend_user_lemma_matrix_col(attribute_list)
extend_user_lemma_matrix_data(number_of_lemmas*[1.0])
########################################################################################################
# Update mention matrix.
########################################################################################################
# Update the mention graph one-by-one.
for mentioned_user_id in retweet_mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(original_tweet_node)
append_mention_graph_col(mention_target_node)
else:
pass
####################################################################################################################
# Final steps of preprocessing tweets.
####################################################################################################################
# Discard any duplicates.
user_id_set = set(user_id_set)
number_of_users = len(user_id_set)
# min_number_of_users = max(user_id_set) + 1
# Form mention graph adjacency matrix.
mention_graph_row = np.array(mention_graph_row, dtype=np.int64)
mention_graph_col = np.array(mention_graph_col, dtype=np.int64)
mention_graph_data = np.ones_like(mention_graph_row, dtype=np.float64)
mention_graph = spsp.coo_matrix((mention_graph_data, (mention_graph_row, mention_graph_col)),
shape=(number_of_users, number_of_users))
mention_graph = spsp.coo_matrix(spsp.csr_matrix(mention_graph))
# Form retweet graph adjacency matrix.
retweet_graph_row = np.array(retweet_graph_row, dtype=np.int64)
retweet_graph_col = np.array(retweet_graph_col, dtype=np.int64)
retweet_graph_data = np.ones_like(retweet_graph_row, dtype=np.float64)
retweet_graph = spsp.coo_matrix((retweet_graph_data, (retweet_graph_row, retweet_graph_col)),
shape=(number_of_users, number_of_users))
retweet_graph = spsp.coo_matrix(spsp.csr_matrix(retweet_graph))
# Form user-lemma matrix.
number_of_lemmas = len(lemma_to_attribute)
user_lemma_matrix_row = np.array(user_lemma_matrix_row, dtype=np.int64)
user_lemma_matrix_col = np.array(user_lemma_matrix_col, dtype=np.int64)
user_lemma_matrix_data = np.array(user_lemma_matrix_data, dtype=np.float64)
user_lemma_matrix = spsp.coo_matrix((user_lemma_matrix_data, (user_lemma_matrix_row, user_lemma_matrix_col)),
shape=(number_of_users, number_of_lemmas))
user_lemma_matrix = spsp.coo_matrix(spsp.csr_matrix(user_lemma_matrix))
node_to_id = dict(zip(id_to_node.values(), id_to_node.keys()))
# tagger.close()
return mention_graph, retweet_graph, user_lemma_matrix, tweet_id_set, user_id_set, node_to_id, lemma_to_attribute, id_to_name, id_to_username, id_to_listedcount | Given a tweet python generator, we encode the information into mention and retweet graphs and a lemma matrix.
We assume that the tweets are given in increasing timestamp.
Inputs: - tweet_generator: A python generator of tweets in python dictionary (json) format.
Outputs: - mention_graph: The mention graph as a SciPy sparse matrix.
- retweet_graph: The retweet graph as a SciPy sparse matrix.
- user_lemma_matrix: The user lemma vector representation matrix as a SciPy sparse matrix.
- tweet_id_set: A python set containing the Twitter ids for all the dataset tweets.
- user_id_set: A python set containing the Twitter ids for all the dataset users.
- lemma_to_attribute: A map from lemmas to numbers in python dictionary format. | Below is the the instruction that describes the task:
### Input:
Given a tweet python generator, we encode the information into mention and retweet graphs and a lemma matrix.
We assume that the tweets are given in increasing timestamp.
Inputs: - tweet_generator: A python generator of tweets in python dictionary (json) format.
Outputs: - mention_graph: The mention graph as a SciPy sparse matrix.
- retweet_graph: The retweet graph as a SciPy sparse matrix.
- user_lemma_matrix: The user lemma vector representation matrix as a SciPy sparse matrix.
- tweet_id_set: A python set containing the Twitter ids for all the dataset tweets.
- user_id_set: A python set containing the Twitter ids for all the dataset users.
- lemma_to_attribute: A map from lemmas to numbers in python dictionary format.
### Response:
def extract_graphs_and_lemmas_from_tweets(tweet_generator):
"""
Given a tweet python generator, we encode the information into mention and retweet graphs and a lemma matrix.
We assume that the tweets are given in increasing timestamp.
Inputs: - tweet_generator: A python generator of tweets in python dictionary (json) format.
Outputs: - mention_graph: The mention graph as a SciPy sparse matrix.
- retweet_graph: The retweet graph as a SciPy sparse matrix.
- user_lemma_matrix: The user lemma vector representation matrix as a SciPy sparse matrix.
- tweet_id_set: A python set containing the Twitter ids for all the dataset tweets.
- user_id_set: A python set containing the Twitter ids for all the dataset users.
- lemma_to_attribute: A map from lemmas to numbers in python dictionary format.
"""
####################################################################################################################
# Prepare for iterating over tweets.
####################################################################################################################
# These are initialized as lists for incremental extension.
tweet_id_set = set()
user_id_set = list()
add_tweet_id = tweet_id_set.add
append_user_id = user_id_set.append
# Initialize sparse matrix arrays.
mention_graph_row = list()
mention_graph_col = list()
retweet_graph_row = list()
retweet_graph_col = list()
user_lemma_matrix_row = list()
user_lemma_matrix_col = list()
user_lemma_matrix_data = list()
append_mention_graph_row = mention_graph_row.append
append_mention_graph_col = mention_graph_col.append
append_retweet_graph_row = retweet_graph_row.append
append_retweet_graph_col = retweet_graph_col.append
extend_user_lemma_matrix_row = user_lemma_matrix_row.extend
extend_user_lemma_matrix_col = user_lemma_matrix_col.extend
extend_user_lemma_matrix_data = user_lemma_matrix_data.extend
# Initialize dictionaries.
id_to_node = dict()
id_to_name = dict()
id_to_username = dict()
id_to_listedcount = dict()
lemma_to_attribute = dict()
sent_tokenize, _treebank_word_tokenize = get_tokenizer()
# tagger = HunposTagger('hunpos-1.0-linux/english.model', 'hunpos-1.0-linux/hunpos-tag')
# tagger = PerceptronTagger()
tagger = get_braupt_tagger()
lemmatizer, lemmatize = get_lemmatizer("wordnet")
stopset = get_stopset()
first_cap_re, all_cap_re = get_camel_case_regexes()
digits_punctuation_whitespace_re = get_digits_punctuation_whitespace_regex()
pos_set = get_pos_set()
####################################################################################################################
# Iterate over tweets.
####################################################################################################################
counter = 0
for tweet in tweet_generator:
# print(tweet)
# Increment tweet counter.
counter += 1
# if counter % 10000 == 0:
# print(counter)
# print(counter)
# Extract base tweet's values.
try:
tweet_id = tweet["id"]
user_id = tweet["user"]["id"]
user_name = tweet["user"]["name"]
user_screen_name = tweet["user"]["screen_name"]
listed_count_raw = tweet["user"]["listed_count"]
tweet_text = tweet["text"]
tweet_in_reply_to_user_id = tweet["in_reply_to_user_id"]
tweet_in_reply_to_screen_name = tweet["in_reply_to_screen_name"]
tweet_entities_user_mentions = tweet["entities"]["user_mentions"]
if "retweeted_status" not in tweet.keys():
user_mention_id_list = list()
user_mention_screen_name_list = list()
for user_mention in tweet_entities_user_mentions:
user_mention_id_list.append(user_mention["id"])
user_mention_screen_name_list.append(user_mention["screen_name"])
else:
# Extract base tweet's values.
original_tweet = tweet["retweeted_status"]
original_tweet_id = original_tweet["id"]
original_tweet_user_id = original_tweet["user"]["id"]
original_tweet_user_name = original_tweet["user"]["name"]
original_tweet_user_screen_name = original_tweet["user"]["screen_name"]
listed_count_raw = original_tweet["user"]["listed_count"]
original_tweet_text = original_tweet["text"]
original_tweet_in_reply_to_user_id = original_tweet["in_reply_to_user_id"]
original_tweet_in_reply_to_screen_name = original_tweet["in_reply_to_screen_name"]
original_tweet_entities_user_mentions = original_tweet["entities"]["user_mentions"]
original_tweet_user_mention_id_list = list()
original_tweet_user_mention_screen_name_list = list()
for user_mention in original_tweet_entities_user_mentions:
original_tweet_user_mention_id_list.append(user_mention["id"])
original_tweet_user_mention_screen_name_list.append(user_mention["screen_name"])
except KeyError:
continue
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
source_node = id_to_node.setdefault(user_id, graph_size)
if listed_count_raw is None:
id_to_listedcount[user_id] = 0
else:
id_to_listedcount[user_id] = int(listed_count_raw)
# Update sets, lists and dictionaries.
add_tweet_id(tweet_id)
id_to_name[user_id] = user_screen_name
id_to_username[user_id] = user_name
append_user_id(user_id)
################################################################################################################
# We are dealing with an original tweet.
################################################################################################################
if "retweeted_status" not in tweet.keys():
############################################################################################################
# Update user-lemma frequency matrix.
############################################################################################################
# Extract lemmas from the text.
tweet_lemmas, lemma_to_keywordbag = clean_document(tweet_text, sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set)
number_of_lemmas = len(tweet_lemmas)
# Update the user-lemma frequency matrix one-by-one.
attribute_list = list()
append_attribute = attribute_list.append
for lemma in tweet_lemmas:
# Map lemmas to distinct integer numbers.
vocabulary_size = len(lemma_to_attribute)
attribute = lemma_to_attribute.setdefault(lemma, vocabulary_size)
append_attribute(attribute)
# Add values to the sparse matrix arrays.
extend_user_lemma_matrix_row(number_of_lemmas*[source_node])
extend_user_lemma_matrix_col(attribute_list)
extend_user_lemma_matrix_data(number_of_lemmas*[1.0])
############################################################################################################
# Update mention matrix.
############################################################################################################
# Get mentioned user ids.
mentioned_user_id_set = list()
if tweet_in_reply_to_user_id is not None:
mentioned_user_id_set.append(tweet_in_reply_to_user_id)
id_to_name[tweet_in_reply_to_user_id] = tweet_in_reply_to_screen_name
for user_mention, mentioned_user_id, mentioned_user_screen_name in zip(tweet_entities_user_mentions,
user_mention_id_list,
user_mention_screen_name_list):
mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = mentioned_user_screen_name
# We remove duplicates.
mentioned_user_id_set = set(mentioned_user_id_set)
# Update the mention graph one-by-one.
for mentioned_user_id in mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(source_node)
append_mention_graph_col(mention_target_node)
################################################################################################################
# We are dealing with a retweet.
################################################################################################################
else:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
original_tweet_node = id_to_node.setdefault(original_tweet_user_id, graph_size)
if listed_count_raw is None:
id_to_listedcount[user_id] = 0
else:
id_to_listedcount[user_id] = int(listed_count_raw)
# Update retweet graph.
append_retweet_graph_row(source_node)
append_retweet_graph_col(original_tweet_node)
# Extract lemmas from the text.
tweet_lemmas, lemma_to_keywordbag = clean_document(original_tweet_text, sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set)
number_of_lemmas = len(tweet_lemmas)
# Update the user-lemma frequency matrix one-by-one.
attribute_list = list()
append_attribute = attribute_list.append
for lemma in tweet_lemmas:
# Map lemmas to distinct integer numbers.
vocabulary_size = len(lemma_to_attribute)
attribute = lemma_to_attribute.setdefault(lemma, vocabulary_size)
append_attribute(attribute)
# Add values to the sparse matrix arrays.
extend_user_lemma_matrix_row(number_of_lemmas*[source_node])
extend_user_lemma_matrix_col(attribute_list)
extend_user_lemma_matrix_data(number_of_lemmas*[1.0])
# Get mentioned user ids.
mentioned_user_id_set = list()
if original_tweet_in_reply_to_user_id is not None:
mentioned_user_id_set.append(original_tweet_in_reply_to_user_id)
id_to_name[original_tweet_in_reply_to_user_id] = original_tweet_in_reply_to_screen_name
for user_mention, mentioned_user_id, mentioned_user_screen_name in zip(original_tweet_entities_user_mentions,
original_tweet_user_mention_id_list,
original_tweet_user_mention_screen_name_list):
mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = mentioned_user_screen_name
# We remove duplicates.
mentioned_user_id_set = set(mentioned_user_id_set)
# Get mentioned user ids.
retweet_mentioned_user_id_set = list()
if original_tweet_in_reply_to_user_id is not None:
retweet_mentioned_user_id_set.append(original_tweet_in_reply_to_user_id)
id_to_name[original_tweet_in_reply_to_user_id] = original_tweet_in_reply_to_screen_name
for user_mention, mentioned_user_id, mentioned_user_screen_name in zip(original_tweet_entities_user_mentions,
original_tweet_user_mention_id_list,
original_tweet_user_mention_screen_name_list):
retweet_mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = mentioned_user_screen_name
# We remove duplicates.
retweet_mentioned_user_id_set = set(retweet_mentioned_user_id_set)
mentioned_user_id_set.update(retweet_mentioned_user_id_set)
# Update the mention graph one-by-one.
for mentioned_user_id in mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(source_node)
append_mention_graph_col(mention_target_node)
# This is the first time we deal with this tweet.
if original_tweet_id not in tweet_id_set:
# Update sets, lists and dictionaries.
add_tweet_id(original_tweet_id)
id_to_name[original_tweet_user_id] = original_tweet_user_screen_name
id_to_username[original_tweet_user_id] = original_tweet_user_name
append_user_id(original_tweet_user_id)
########################################################################################################
# Update user-lemma frequency matrix.
########################################################################################################
# Update the user-lemma frequency matrix one-by-one.
attribute_list = list()
append_attribute = attribute_list.append
for lemma in tweet_lemmas:
# Map lemmas to distinct integer numbers.
vocabulary_size = len(lemma_to_attribute)
attribute = lemma_to_attribute.setdefault(lemma, vocabulary_size)
append_attribute(attribute)
# Add values to the sparse matrix arrays.
extend_user_lemma_matrix_row(number_of_lemmas*[source_node])
extend_user_lemma_matrix_col(attribute_list)
extend_user_lemma_matrix_data(number_of_lemmas*[1.0])
########################################################################################################
# Update mention matrix.
########################################################################################################
# Update the mention graph one-by-one.
for mentioned_user_id in retweet_mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(original_tweet_node)
append_mention_graph_col(mention_target_node)
else:
pass
####################################################################################################################
# Final steps of preprocessing tweets.
####################################################################################################################
# Discard any duplicates.
user_id_set = set(user_id_set)
number_of_users = len(user_id_set)
# min_number_of_users = max(user_id_set) + 1
# Form mention graph adjacency matrix.
mention_graph_row = np.array(mention_graph_row, dtype=np.int64)
mention_graph_col = np.array(mention_graph_col, dtype=np.int64)
mention_graph_data = np.ones_like(mention_graph_row, dtype=np.float64)
mention_graph = spsp.coo_matrix((mention_graph_data, (mention_graph_row, mention_graph_col)),
shape=(number_of_users, number_of_users))
mention_graph = spsp.coo_matrix(spsp.csr_matrix(mention_graph))
# Form retweet graph adjacency matrix.
retweet_graph_row = np.array(retweet_graph_row, dtype=np.int64)
retweet_graph_col = np.array(retweet_graph_col, dtype=np.int64)
retweet_graph_data = np.ones_like(retweet_graph_row, dtype=np.float64)
retweet_graph = spsp.coo_matrix((retweet_graph_data, (retweet_graph_row, retweet_graph_col)),
shape=(number_of_users, number_of_users))
retweet_graph = spsp.coo_matrix(spsp.csr_matrix(retweet_graph))
# Form user-lemma matrix.
number_of_lemmas = len(lemma_to_attribute)
user_lemma_matrix_row = np.array(user_lemma_matrix_row, dtype=np.int64)
user_lemma_matrix_col = np.array(user_lemma_matrix_col, dtype=np.int64)
user_lemma_matrix_data = np.array(user_lemma_matrix_data, dtype=np.float64)
user_lemma_matrix = spsp.coo_matrix((user_lemma_matrix_data, (user_lemma_matrix_row, user_lemma_matrix_col)),
shape=(number_of_users, number_of_lemmas))
user_lemma_matrix = spsp.coo_matrix(spsp.csr_matrix(user_lemma_matrix))
node_to_id = dict(zip(id_to_node.values(), id_to_node.keys()))
# tagger.close()
return mention_graph, retweet_graph, user_lemma_matrix, tweet_id_set, user_id_set, node_to_id, lemma_to_attribute, id_to_name, id_to_username, id_to_listedcount |
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
def decorator(func):
def wrapper(**kargs):
for key, value in six.iteritems(vkargs):
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(**kargs)
return wrapper
return decorator | Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403). | Below is the the instruction that describes the task:
### Input:
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
### Response:
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
def decorator(func):
def wrapper(**kargs):
for key, value in six.iteritems(vkargs):
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(**kargs)
return wrapper
return decorator |
def _maybe_validate_perm(perm, validate_args, name=None):
"""Checks that `perm` is valid."""
with tf.name_scope(name or 'maybe_validate_perm'):
assertions = []
if not dtype_util.is_integer(perm.dtype):
raise TypeError('`perm` must be integer type')
msg = '`perm` must be a vector.'
if tensorshape_util.rank(perm.shape) is not None:
if tensorshape_util.rank(perm.shape) != 1:
raise ValueError(
msg[:-1] +
', saw rank: {}.'.format(tensorshape_util.rank(perm.shape)))
elif validate_args:
assertions += [assert_util.assert_rank(perm, 1, message=msg)]
perm_ = tf.get_static_value(perm)
msg = '`perm` must be a valid permutation vector.'
if perm_ is not None:
if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)):
raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_))
elif validate_args:
assertions += [
assert_util.assert_equal(
tf.sort(perm), tf.range(tf.size(input=perm)), message=msg)
]
return assertions | Checks that `perm` is valid. | Below is the the instruction that describes the task:
### Input:
Checks that `perm` is valid.
### Response:
def _maybe_validate_perm(perm, validate_args, name=None):
"""Checks that `perm` is valid."""
with tf.name_scope(name or 'maybe_validate_perm'):
assertions = []
if not dtype_util.is_integer(perm.dtype):
raise TypeError('`perm` must be integer type')
msg = '`perm` must be a vector.'
if tensorshape_util.rank(perm.shape) is not None:
if tensorshape_util.rank(perm.shape) != 1:
raise ValueError(
msg[:-1] +
', saw rank: {}.'.format(tensorshape_util.rank(perm.shape)))
elif validate_args:
assertions += [assert_util.assert_rank(perm, 1, message=msg)]
perm_ = tf.get_static_value(perm)
msg = '`perm` must be a valid permutation vector.'
if perm_ is not None:
if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)):
raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_))
elif validate_args:
assertions += [
assert_util.assert_equal(
tf.sort(perm), tf.range(tf.size(input=perm)), message=msg)
]
return assertions |
def create_proxy_model(name, model_mixins, base_model, attrs=None, module=None):
"""
Create a Django Proxy Model on the fly, to be used by any Cascade Plugin.
"""
from django.apps import apps
class Meta:
proxy = True
app_label = 'cmsplugin_cascade'
name = str(name + 'Model')
try:
Model = apps.get_registered_model(Meta.app_label, name)
except LookupError:
bases = model_mixins + (base_model,)
attrs = dict(attrs or {}, Meta=Meta, __module__=module)
Model = type(name, bases, attrs)
fake_proxy_models[name] = bases
return Model | Create a Django Proxy Model on the fly, to be used by any Cascade Plugin. | Below is the the instruction that describes the task:
### Input:
Create a Django Proxy Model on the fly, to be used by any Cascade Plugin.
### Response:
def create_proxy_model(name, model_mixins, base_model, attrs=None, module=None):
"""
Create a Django Proxy Model on the fly, to be used by any Cascade Plugin.
"""
from django.apps import apps
class Meta:
proxy = True
app_label = 'cmsplugin_cascade'
name = str(name + 'Model')
try:
Model = apps.get_registered_model(Meta.app_label, name)
except LookupError:
bases = model_mixins + (base_model,)
attrs = dict(attrs or {}, Meta=Meta, __module__=module)
Model = type(name, bases, attrs)
fake_proxy_models[name] = bases
return Model |
def identity(requestContext, name, step=60):
"""
Identity function:
Returns datapoints where the value equals the timestamp of the datapoint.
Useful when you have another series where the value is a timestamp, and
you want to compare it to the time of the datapoint, to render an age
Example::
&target=identity("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == t.
Accepts optional second argument as 'step' parameter (default step is
60 sec)
"""
start = int(epoch(requestContext["startTime"]))
end = int(epoch(requestContext["endTime"]))
values = range(start, end, step)
series = TimeSeries(name, start, end, step, values)
series.pathExpression = 'identity("%s")' % name
return [series] | Identity function:
Returns datapoints where the value equals the timestamp of the datapoint.
Useful when you have another series where the value is a timestamp, and
you want to compare it to the time of the datapoint, to render an age
Example::
&target=identity("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == t.
Accepts optional second argument as 'step' parameter (default step is
60 sec) | Below is the the instruction that describes the task:
### Input:
Identity function:
Returns datapoints where the value equals the timestamp of the datapoint.
Useful when you have another series where the value is a timestamp, and
you want to compare it to the time of the datapoint, to render an age
Example::
&target=identity("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == t.
Accepts optional second argument as 'step' parameter (default step is
60 sec)
### Response:
def identity(requestContext, name, step=60):
"""
Identity function:
Returns datapoints where the value equals the timestamp of the datapoint.
Useful when you have another series where the value is a timestamp, and
you want to compare it to the time of the datapoint, to render an age
Example::
&target=identity("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == t.
Accepts optional second argument as 'step' parameter (default step is
60 sec)
"""
start = int(epoch(requestContext["startTime"]))
end = int(epoch(requestContext["endTime"]))
values = range(start, end, step)
series = TimeSeries(name, start, end, step, values)
series.pathExpression = 'identity("%s")' % name
return [series] |
def extract(query_dict, prefix=""):
"""
Extract the *order_by*, *per_page*, and *page* parameters from
`query_dict` (a Django QueryDict), and return a dict suitable for
instantiating a preconfigured Table object.
"""
strs = ['order_by']
ints = ['per_page', 'page']
extracted = { }
for key in (strs + ints):
if (prefix + key) in query_dict:
val = query_dict.get(prefix + key)
extracted[key] = (val
if not key in ints
else int(val))
return extracted | Extract the *order_by*, *per_page*, and *page* parameters from
`query_dict` (a Django QueryDict), and return a dict suitable for
instantiating a preconfigured Table object. | Below is the the instruction that describes the task:
### Input:
Extract the *order_by*, *per_page*, and *page* parameters from
`query_dict` (a Django QueryDict), and return a dict suitable for
instantiating a preconfigured Table object.
### Response:
def extract(query_dict, prefix=""):
"""
Extract the *order_by*, *per_page*, and *page* parameters from
`query_dict` (a Django QueryDict), and return a dict suitable for
instantiating a preconfigured Table object.
"""
strs = ['order_by']
ints = ['per_page', 'page']
extracted = { }
for key in (strs + ints):
if (prefix + key) in query_dict:
val = query_dict.get(prefix + key)
extracted[key] = (val
if not key in ints
else int(val))
return extracted |
def conn_handler(self, session: ClientSession, proxy: str = None) -> ConnectionHandler:
"""
Return connection handler instance for the endpoint
:param session: AIOHTTP client session instance
:param proxy: Proxy url
:return:
"""
if self.server:
return ConnectionHandler("https", "wss", self.server, self.port, self.path, session, proxy)
elif self.ipv6:
return ConnectionHandler("https", "wss", "[{0}]".format(self.ipv6), self.port, self.path, session, proxy)
return ConnectionHandler("https", "wss", self.ipv4, self.port, self.path, session, proxy) | Return connection handler instance for the endpoint
:param session: AIOHTTP client session instance
:param proxy: Proxy url
:return: | Below is the the instruction that describes the task:
### Input:
Return connection handler instance for the endpoint
:param session: AIOHTTP client session instance
:param proxy: Proxy url
:return:
### Response:
def conn_handler(self, session: ClientSession, proxy: str = None) -> ConnectionHandler:
"""
Return connection handler instance for the endpoint
:param session: AIOHTTP client session instance
:param proxy: Proxy url
:return:
"""
if self.server:
return ConnectionHandler("https", "wss", self.server, self.port, self.path, session, proxy)
elif self.ipv6:
return ConnectionHandler("https", "wss", "[{0}]".format(self.ipv6), self.port, self.path, session, proxy)
return ConnectionHandler("https", "wss", self.ipv4, self.port, self.path, session, proxy) |
def _get_triplet(dd):
"""Return a triplet from a dialogue dictionary.
:param dd: Dialogue dictionary.
:type dd: Dict[str, str]
:return: (query, response, error response)
:rtype: (str, str | NoResponse, str | NoResponse)
"""
return _s(dd['q']), _s(dd.get('r', NoResponse)), _s(dd.get('e', NoResponse)) | Return a triplet from a dialogue dictionary.
:param dd: Dialogue dictionary.
:type dd: Dict[str, str]
:return: (query, response, error response)
:rtype: (str, str | NoResponse, str | NoResponse) | Below is the the instruction that describes the task:
### Input:
Return a triplet from a dialogue dictionary.
:param dd: Dialogue dictionary.
:type dd: Dict[str, str]
:return: (query, response, error response)
:rtype: (str, str | NoResponse, str | NoResponse)
### Response:
def _get_triplet(dd):
"""Return a triplet from a dialogue dictionary.
:param dd: Dialogue dictionary.
:type dd: Dict[str, str]
:return: (query, response, error response)
:rtype: (str, str | NoResponse, str | NoResponse)
"""
return _s(dd['q']), _s(dd.get('r', NoResponse)), _s(dd.get('e', NoResponse)) |
def fix_multiclass_predict_proba(y_proba, # type: np.ndarray
seen_classes,
complete_classes
):
# type: (...) -> np.ndarray
"""
Add missing columns to predict_proba result.
When a multiclass classifier is fit on a dataset which only contains
a subset of possible classes its predict_proba result only has columns
corresponding to seen classes. This function adds missing columns.
"""
assert set(complete_classes) >= set(seen_classes)
y_proba_fixed = np.zeros(
shape=(y_proba.shape[0], len(complete_classes)),
dtype=y_proba.dtype,
)
class_mapping = np.searchsorted(complete_classes, seen_classes)
y_proba_fixed[:, class_mapping] = y_proba
return y_proba_fixed | Add missing columns to predict_proba result.
When a multiclass classifier is fit on a dataset which only contains
a subset of possible classes its predict_proba result only has columns
corresponding to seen classes. This function adds missing columns. | Below is the the instruction that describes the task:
### Input:
Add missing columns to predict_proba result.
When a multiclass classifier is fit on a dataset which only contains
a subset of possible classes its predict_proba result only has columns
corresponding to seen classes. This function adds missing columns.
### Response:
def fix_multiclass_predict_proba(y_proba, # type: np.ndarray
seen_classes,
complete_classes
):
# type: (...) -> np.ndarray
"""
Add missing columns to predict_proba result.
When a multiclass classifier is fit on a dataset which only contains
a subset of possible classes its predict_proba result only has columns
corresponding to seen classes. This function adds missing columns.
"""
assert set(complete_classes) >= set(seen_classes)
y_proba_fixed = np.zeros(
shape=(y_proba.shape[0], len(complete_classes)),
dtype=y_proba.dtype,
)
class_mapping = np.searchsorted(complete_classes, seen_classes)
y_proba_fixed[:, class_mapping] = y_proba
return y_proba_fixed |
def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:
"""Get jobs from a queue."""
jobs_json_string = self._run_script(
self._get_jobs_from_queue,
self._to_namespaced(queue),
self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),
JobStatus.RUNNING.value,
max_jobs
)
jobs = json.loads(jobs_json_string.decode())
jobs = [Job.deserialize(job) for job in jobs]
return jobs | Get jobs from a queue. | Below is the the instruction that describes the task:
### Input:
Get jobs from a queue.
### Response:
def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:
"""Get jobs from a queue."""
jobs_json_string = self._run_script(
self._get_jobs_from_queue,
self._to_namespaced(queue),
self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),
JobStatus.RUNNING.value,
max_jobs
)
jobs = json.loads(jobs_json_string.decode())
jobs = [Job.deserialize(job) for job in jobs]
return jobs |
def changeGroupImageRemote(self, image_url, thread_id=None):
"""
Changes a thread image from a URL
:param image_url: URL of an image to upload and change
:param thread_id: User/Group ID to change image. See :ref:`intro_threads`
:raises: FBchatException if request failed
"""
(image_id, mimetype), = self._upload(get_files_from_urls([image_url]))
return self._changeGroupImage(image_id, thread_id) | Changes a thread image from a URL
:param image_url: URL of an image to upload and change
:param thread_id: User/Group ID to change image. See :ref:`intro_threads`
:raises: FBchatException if request failed | Below is the the instruction that describes the task:
### Input:
Changes a thread image from a URL
:param image_url: URL of an image to upload and change
:param thread_id: User/Group ID to change image. See :ref:`intro_threads`
:raises: FBchatException if request failed
### Response:
def changeGroupImageRemote(self, image_url, thread_id=None):
"""
Changes a thread image from a URL
:param image_url: URL of an image to upload and change
:param thread_id: User/Group ID to change image. See :ref:`intro_threads`
:raises: FBchatException if request failed
"""
(image_id, mimetype), = self._upload(get_files_from_urls([image_url]))
return self._changeGroupImage(image_id, thread_id) |
def add(self, campaign_id, item_id, default_price, title, img_url, nick=None):
'''xxxxx.xxxxx.adgroup.add
===================================
创建一个推广组'''
request = TOPRequest('xxxxx.xxxxx.adgroup.add')
request['campaign_id'] = campaign_id
request['item_id'] = item_id
request['default_price'] = default_price
request['title'] = title
request['img_url'] = img_url
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':ADGroup})
return self.result | xxxxx.xxxxx.adgroup.add
===================================
创建一个推广组 | Below is the the instruction that describes the task:
### Input:
xxxxx.xxxxx.adgroup.add
===================================
创建一个推广组
### Response:
def add(self, campaign_id, item_id, default_price, title, img_url, nick=None):
'''xxxxx.xxxxx.adgroup.add
===================================
创建一个推广组'''
request = TOPRequest('xxxxx.xxxxx.adgroup.add')
request['campaign_id'] = campaign_id
request['item_id'] = item_id
request['default_price'] = default_price
request['title'] = title
request['img_url'] = img_url
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':ADGroup})
return self.result |
def in_same_table(self):
"""True if both cells provided to constructor are in same table."""
if self._tc.tbl is self._other_tc.tbl:
return True
return False | True if both cells provided to constructor are in same table. | Below is the the instruction that describes the task:
### Input:
True if both cells provided to constructor are in same table.
### Response:
def in_same_table(self):
"""True if both cells provided to constructor are in same table."""
if self._tc.tbl is self._other_tc.tbl:
return True
return False |
def close_connection(self, address, cause):
"""
Closes the connection with given address.
:param address: (:class:`~hazelcast.core.Address`), address of the connection to be closed.
:param cause: (Exception), the cause for closing the connection.
:return: (bool), ``true`` if the connection is closed, ``false`` otherwise.
"""
try:
connection = self.connections[address]
connection.close(cause)
except KeyError:
self.logger.warning("No connection with %s was found to close.", address, extra=self._logger_extras)
return False | Closes the connection with given address.
:param address: (:class:`~hazelcast.core.Address`), address of the connection to be closed.
:param cause: (Exception), the cause for closing the connection.
:return: (bool), ``true`` if the connection is closed, ``false`` otherwise. | Below is the the instruction that describes the task:
### Input:
Closes the connection with given address.
:param address: (:class:`~hazelcast.core.Address`), address of the connection to be closed.
:param cause: (Exception), the cause for closing the connection.
:return: (bool), ``true`` if the connection is closed, ``false`` otherwise.
### Response:
def close_connection(self, address, cause):
"""
Closes the connection with given address.
:param address: (:class:`~hazelcast.core.Address`), address of the connection to be closed.
:param cause: (Exception), the cause for closing the connection.
:return: (bool), ``true`` if the connection is closed, ``false`` otherwise.
"""
try:
connection = self.connections[address]
connection.close(cause)
except KeyError:
self.logger.warning("No connection with %s was found to close.", address, extra=self._logger_extras)
return False |
def get_shelvesets(self, request_data=None, top=None, skip=None):
"""GetShelvesets.
Return a collection of shallow shelveset references.
:param :class:`<TfvcShelvesetRequestData> <azure.devops.v5_0.tfvc.models.TfvcShelvesetRequestData>` request_data: name, owner, and maxCommentLength
:param int top: Max number of shelvesets to return
:param int skip: Number of shelvesets to skip
:rtype: [TfvcShelvesetRef]
"""
query_parameters = {}
if request_data is not None:
if request_data.name is not None:
query_parameters['requestData.name'] = request_data.name
if request_data.owner is not None:
query_parameters['requestData.owner'] = request_data.owner
if request_data.max_comment_length is not None:
query_parameters['requestData.maxCommentLength'] = request_data.max_comment_length
if request_data.max_change_count is not None:
query_parameters['requestData.maxChangeCount'] = request_data.max_change_count
if request_data.include_details is not None:
query_parameters['requestData.includeDetails'] = request_data.include_details
if request_data.include_work_items is not None:
query_parameters['requestData.includeWorkItems'] = request_data.include_work_items
if request_data.include_links is not None:
query_parameters['requestData.includeLinks'] = request_data.include_links
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='e36d44fb-e907-4b0a-b194-f83f1ed32ad3',
version='5.0',
query_parameters=query_parameters)
return self._deserialize('[TfvcShelvesetRef]', self._unwrap_collection(response)) | GetShelvesets.
Return a collection of shallow shelveset references.
:param :class:`<TfvcShelvesetRequestData> <azure.devops.v5_0.tfvc.models.TfvcShelvesetRequestData>` request_data: name, owner, and maxCommentLength
:param int top: Max number of shelvesets to return
:param int skip: Number of shelvesets to skip
:rtype: [TfvcShelvesetRef] | Below is the the instruction that describes the task:
### Input:
GetShelvesets.
Return a collection of shallow shelveset references.
:param :class:`<TfvcShelvesetRequestData> <azure.devops.v5_0.tfvc.models.TfvcShelvesetRequestData>` request_data: name, owner, and maxCommentLength
:param int top: Max number of shelvesets to return
:param int skip: Number of shelvesets to skip
:rtype: [TfvcShelvesetRef]
### Response:
def get_shelvesets(self, request_data=None, top=None, skip=None):
"""GetShelvesets.
Return a collection of shallow shelveset references.
:param :class:`<TfvcShelvesetRequestData> <azure.devops.v5_0.tfvc.models.TfvcShelvesetRequestData>` request_data: name, owner, and maxCommentLength
:param int top: Max number of shelvesets to return
:param int skip: Number of shelvesets to skip
:rtype: [TfvcShelvesetRef]
"""
query_parameters = {}
if request_data is not None:
if request_data.name is not None:
query_parameters['requestData.name'] = request_data.name
if request_data.owner is not None:
query_parameters['requestData.owner'] = request_data.owner
if request_data.max_comment_length is not None:
query_parameters['requestData.maxCommentLength'] = request_data.max_comment_length
if request_data.max_change_count is not None:
query_parameters['requestData.maxChangeCount'] = request_data.max_change_count
if request_data.include_details is not None:
query_parameters['requestData.includeDetails'] = request_data.include_details
if request_data.include_work_items is not None:
query_parameters['requestData.includeWorkItems'] = request_data.include_work_items
if request_data.include_links is not None:
query_parameters['requestData.includeLinks'] = request_data.include_links
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='e36d44fb-e907-4b0a-b194-f83f1ed32ad3',
version='5.0',
query_parameters=query_parameters)
return self._deserialize('[TfvcShelvesetRef]', self._unwrap_collection(response)) |
def validate_fields(self, **kwargs):
""" ensures that all incoming fields are the types that were specified """
for field in self.fields:
value = kwargs[field]
required_type = self.fields[field]
if type(value) != required_type:
raise TypeError('{}.{} needs to be a {}, recieved: {}({})'.format(
self.name,
field,
required_type.__name__,
type(value).__name__,
value.__repr__())) | ensures that all incoming fields are the types that were specified | Below is the the instruction that describes the task:
### Input:
ensures that all incoming fields are the types that were specified
### Response:
def validate_fields(self, **kwargs):
""" ensures that all incoming fields are the types that were specified """
for field in self.fields:
value = kwargs[field]
required_type = self.fields[field]
if type(value) != required_type:
raise TypeError('{}.{} needs to be a {}, recieved: {}({})'.format(
self.name,
field,
required_type.__name__,
type(value).__name__,
value.__repr__())) |
def getXlogStatus(self):
"""Returns Transaction Logging or Recovery Status.
@return: Dictionary of status items.
"""
inRecovery = None
if self.checkVersion('9.0'):
inRecovery = self._simpleQuery("SELECT pg_is_in_recovery();")
cur = self._conn.cursor()
if inRecovery:
cols = ['pg_last_xlog_receive_location()',
'pg_last_xlog_replay_location()',]
headers = ['xlog_receive_location',
'xlog_replay_location',]
if self.checkVersion('9.1'):
cols.extend(['pg_last_xact_replay_timestamp()',
'pg_is_xlog_replay_paused()',])
headers.extend(['xact_replay_timestamp',
'xlog_replay_paused',])
cur.execute("""SELECT %s;""" % ','.join(cols))
headers = ('xlog_receive_location', 'xlog_replay_location')
else:
cur.execute("""SELECT
pg_current_xlog_location(),
pg_xlogfile_name(pg_current_xlog_location());""")
headers = ('xlog_location', 'xlog_filename')
row = cur.fetchone()
info_dict = dict(zip(headers, row))
if inRecovery is not None:
info_dict['in_recovery'] = inRecovery
return info_dict | Returns Transaction Logging or Recovery Status.
@return: Dictionary of status items. | Below is the the instruction that describes the task:
### Input:
Returns Transaction Logging or Recovery Status.
@return: Dictionary of status items.
### Response:
def getXlogStatus(self):
"""Returns Transaction Logging or Recovery Status.
@return: Dictionary of status items.
"""
inRecovery = None
if self.checkVersion('9.0'):
inRecovery = self._simpleQuery("SELECT pg_is_in_recovery();")
cur = self._conn.cursor()
if inRecovery:
cols = ['pg_last_xlog_receive_location()',
'pg_last_xlog_replay_location()',]
headers = ['xlog_receive_location',
'xlog_replay_location',]
if self.checkVersion('9.1'):
cols.extend(['pg_last_xact_replay_timestamp()',
'pg_is_xlog_replay_paused()',])
headers.extend(['xact_replay_timestamp',
'xlog_replay_paused',])
cur.execute("""SELECT %s;""" % ','.join(cols))
headers = ('xlog_receive_location', 'xlog_replay_location')
else:
cur.execute("""SELECT
pg_current_xlog_location(),
pg_xlogfile_name(pg_current_xlog_location());""")
headers = ('xlog_location', 'xlog_filename')
row = cur.fetchone()
info_dict = dict(zip(headers, row))
if inRecovery is not None:
info_dict['in_recovery'] = inRecovery
return info_dict |
def _set_action(self, v, load=False):
"""
Setter method for action, mapped from YANG variable /rule/action (rule-action)
If this variable is read-only (config: false) in the
source YANG file, then _set_action is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_action() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'accept': {}, u'reject': {}},), is_leaf=True, yang_name="action", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Action for the command', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='rule-action', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """action must be of a type compatible with rule-action""",
'defined-type': "brocade-aaa:rule-action",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'accept': {}, u'reject': {}},), is_leaf=True, yang_name="action", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Action for the command', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='rule-action', is_config=True)""",
})
self.__action = t
if hasattr(self, '_set'):
self._set() | Setter method for action, mapped from YANG variable /rule/action (rule-action)
If this variable is read-only (config: false) in the
source YANG file, then _set_action is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_action() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for action, mapped from YANG variable /rule/action (rule-action)
If this variable is read-only (config: false) in the
source YANG file, then _set_action is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_action() directly.
### Response:
def _set_action(self, v, load=False):
"""
Setter method for action, mapped from YANG variable /rule/action (rule-action)
If this variable is read-only (config: false) in the
source YANG file, then _set_action is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_action() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'accept': {}, u'reject': {}},), is_leaf=True, yang_name="action", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Action for the command', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='rule-action', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """action must be of a type compatible with rule-action""",
'defined-type': "brocade-aaa:rule-action",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'accept': {}, u'reject': {}},), is_leaf=True, yang_name="action", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Action for the command', u'cli-optional-in-sequence': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='rule-action', is_config=True)""",
})
self.__action = t
if hasattr(self, '_set'):
self._set() |
def set_basefilemtime(self):
"""Set attributes mtimestamp and mtimefs. If the global list
ORIGINEXTENSIONS include any items, try and look for files (in
the directory where self.filename is sitting) with the same base
name as the loaded file, but with an extension specified in
ORIGINEXTENSIONS.
mtimestamp is a timestamp and mtimefs is the file (name) with
that timestamp.
ORIGINEXTENSIONS is empty on delivery. Which means that the
attributes discussed will be based on the file that was loaded,
(unless ORIGINEXTENSIONS is populated before this call).
This is supposed to be a convenience in cases the data file
loaded is some sort of "exported" file format, and the original
file creation time is of interest.
.. note::
If the provided functions in this module is used to get a
pack, this method does not have to be called. It is called by
those functions.
"""
dirpath = os.path.split(self.filename)[0]
name = os.path.basename(self.fs).split('.')[0]
for ext in ORIGINEXTENSIONS: # This should be some user configuration.
res = glob.glob(dirpath + '/' + name + '.' + ext)
if res: # Assume first match is valid.
# If some shell patterns will be used later
self.mtimefs = os.path.normpath(res[0])
# Time stamp string:
self.mtimestamp = time.ctime(os.path.getmtime(self.mtimefs))
break
else:
self.mtimefs = self.filename
self.mtimestamp = time.ctime(os.path.getmtime(self.mtimefs)) | Set attributes mtimestamp and mtimefs. If the global list
ORIGINEXTENSIONS include any items, try and look for files (in
the directory where self.filename is sitting) with the same base
name as the loaded file, but with an extension specified in
ORIGINEXTENSIONS.
mtimestamp is a timestamp and mtimefs is the file (name) with
that timestamp.
ORIGINEXTENSIONS is empty on delivery. Which means that the
attributes discussed will be based on the file that was loaded,
(unless ORIGINEXTENSIONS is populated before this call).
This is supposed to be a convenience in cases the data file
loaded is some sort of "exported" file format, and the original
file creation time is of interest.
.. note::
If the provided functions in this module is used to get a
pack, this method does not have to be called. It is called by
those functions. | Below is the the instruction that describes the task:
### Input:
Set attributes mtimestamp and mtimefs. If the global list
ORIGINEXTENSIONS include any items, try and look for files (in
the directory where self.filename is sitting) with the same base
name as the loaded file, but with an extension specified in
ORIGINEXTENSIONS.
mtimestamp is a timestamp and mtimefs is the file (name) with
that timestamp.
ORIGINEXTENSIONS is empty on delivery. Which means that the
attributes discussed will be based on the file that was loaded,
(unless ORIGINEXTENSIONS is populated before this call).
This is supposed to be a convenience in cases the data file
loaded is some sort of "exported" file format, and the original
file creation time is of interest.
.. note::
If the provided functions in this module is used to get a
pack, this method does not have to be called. It is called by
those functions.
### Response:
def set_basefilemtime(self):
"""Set attributes mtimestamp and mtimefs. If the global list
ORIGINEXTENSIONS include any items, try and look for files (in
the directory where self.filename is sitting) with the same base
name as the loaded file, but with an extension specified in
ORIGINEXTENSIONS.
mtimestamp is a timestamp and mtimefs is the file (name) with
that timestamp.
ORIGINEXTENSIONS is empty on delivery. Which means that the
attributes discussed will be based on the file that was loaded,
(unless ORIGINEXTENSIONS is populated before this call).
This is supposed to be a convenience in cases the data file
loaded is some sort of "exported" file format, and the original
file creation time is of interest.
.. note::
If the provided functions in this module is used to get a
pack, this method does not have to be called. It is called by
those functions.
"""
dirpath = os.path.split(self.filename)[0]
name = os.path.basename(self.fs).split('.')[0]
for ext in ORIGINEXTENSIONS: # This should be some user configuration.
res = glob.glob(dirpath + '/' + name + '.' + ext)
if res: # Assume first match is valid.
# If some shell patterns will be used later
self.mtimefs = os.path.normpath(res[0])
# Time stamp string:
self.mtimestamp = time.ctime(os.path.getmtime(self.mtimefs))
break
else:
self.mtimefs = self.filename
self.mtimestamp = time.ctime(os.path.getmtime(self.mtimefs)) |
def get_ratings(self):
"""get_ratings()
Returns a Vote QuerySet for this rating field."""
return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key) | get_ratings()
Returns a Vote QuerySet for this rating field. | Below is the the instruction that describes the task:
### Input:
get_ratings()
Returns a Vote QuerySet for this rating field.
### Response:
def get_ratings(self):
"""get_ratings()
Returns a Vote QuerySet for this rating field."""
return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key) |
def list2html(lst):
"""
convert a list to html using table formatting
"""
txt = '<TABLE width=100% border=0>'
for l in lst:
txt += '<TR>\n'
if type(l) is str:
txt+= '<TD>' + l + '</TD>\n'
elif type(l) is list:
txt+= '<TD>'
for i in l:
txt += i + ', '
txt+= '</TD>'
else:
txt+= '<TD>' + str(l) + '</TD>\n'
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt | convert a list to html using table formatting | Below is the the instruction that describes the task:
### Input:
convert a list to html using table formatting
### Response:
def list2html(lst):
"""
convert a list to html using table formatting
"""
txt = '<TABLE width=100% border=0>'
for l in lst:
txt += '<TR>\n'
if type(l) is str:
txt+= '<TD>' + l + '</TD>\n'
elif type(l) is list:
txt+= '<TD>'
for i in l:
txt += i + ', '
txt+= '</TD>'
else:
txt+= '<TD>' + str(l) + '</TD>\n'
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt |
def plot_Stokes_diode(dio_cross,diff=True,feedtype='l',**kwargs):
'''
Plots the uncalibrated full stokes spectrum of the noise diode.
Use diff=False to plot both ON and OFF, or diff=True for ON-OFF
'''
#If diff=True, get ON-OFF. If not get ON and OFF separately
if diff==True:
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
else:
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
I,Q,U,V = get_stokes(data,feedtype)
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Plot spectra
if diff==True:
plt.plot(freqs,Idiff,'k-',label='I')
plt.plot(freqs,Qdiff,'r-',label='Q')
plt.plot(freqs,Udiff,'g-',label='U')
plt.plot(freqs,Vdiff,'m-',label='V')
else:
plt.plot(freqs,I_ON,'k-',label='I ON')
plt.plot(freqs,I_OFF,'k--',label='I OFF')
plt.plot(freqs,Q_ON,'r-',label='Q ON')
plt.plot(freqs,Q_OFF,'r--',label='Q OFF')
plt.plot(freqs,U_ON,'g-',label='U ON')
plt.plot(freqs,U_OFF,'g--',label='U OFF')
plt.plot(freqs,V_ON,'m-',label='V ON')
plt.plot(freqs,V_OFF,'m--',label='V OFF')
plt.legend()
plt.xlabel('Frequency (MHz)')
plt.title('Uncalibrated Full Stokes Noise Diode Spectrum')
plt.ylabel('Power (Counts)') | Plots the uncalibrated full stokes spectrum of the noise diode.
Use diff=False to plot both ON and OFF, or diff=True for ON-OFF | Below is the the instruction that describes the task:
### Input:
Plots the uncalibrated full stokes spectrum of the noise diode.
Use diff=False to plot both ON and OFF, or diff=True for ON-OFF
### Response:
def plot_Stokes_diode(dio_cross,diff=True,feedtype='l',**kwargs):
'''
Plots the uncalibrated full stokes spectrum of the noise diode.
Use diff=False to plot both ON and OFF, or diff=True for ON-OFF
'''
#If diff=True, get ON-OFF. If not get ON and OFF separately
if diff==True:
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
else:
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
I,Q,U,V = get_stokes(data,feedtype)
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Plot spectra
if diff==True:
plt.plot(freqs,Idiff,'k-',label='I')
plt.plot(freqs,Qdiff,'r-',label='Q')
plt.plot(freqs,Udiff,'g-',label='U')
plt.plot(freqs,Vdiff,'m-',label='V')
else:
plt.plot(freqs,I_ON,'k-',label='I ON')
plt.plot(freqs,I_OFF,'k--',label='I OFF')
plt.plot(freqs,Q_ON,'r-',label='Q ON')
plt.plot(freqs,Q_OFF,'r--',label='Q OFF')
plt.plot(freqs,U_ON,'g-',label='U ON')
plt.plot(freqs,U_OFF,'g--',label='U OFF')
plt.plot(freqs,V_ON,'m-',label='V ON')
plt.plot(freqs,V_OFF,'m--',label='V OFF')
plt.legend()
plt.xlabel('Frequency (MHz)')
plt.title('Uncalibrated Full Stokes Noise Diode Spectrum')
plt.ylabel('Power (Counts)') |
def parse_variant(store, institute_obj, case_obj, variant_obj, update=False, genome_build='37',
get_compounds = True):
"""Parse information about variants.
- Adds information about compounds
- Updates the information about compounds if necessary and 'update=True'
Args:
store(scout.adapter.MongoAdapter)
institute_obj(scout.models.Institute)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
update(bool): If variant should be updated in database
genome_build(str)
"""
has_changed = False
compounds = variant_obj.get('compounds', [])
if compounds and get_compounds:
# Check if we need to add compound information
# If it is the first time the case is viewed we fill in some compound information
if 'not_loaded' not in compounds[0]:
new_compounds = store.update_variant_compounds(variant_obj)
variant_obj['compounds'] = new_compounds
has_changed = True
# sort compounds on combined rank score
variant_obj['compounds'] = sorted(variant_obj['compounds'],
key=lambda compound: -compound['combined_score'])
# Update the hgnc symbols if they are incorrect
variant_genes = variant_obj.get('genes')
if variant_genes is not None:
for gene_obj in variant_genes:
# If there is no hgnc id there is nothin we can do
if not gene_obj['hgnc_id']:
continue
# Else we collect the gene object and check the id
if gene_obj.get('hgnc_symbol') is None:
hgnc_gene = store.hgnc_gene(gene_obj['hgnc_id'], build=genome_build)
if not hgnc_gene:
continue
has_changed = True
gene_obj['hgnc_symbol'] = hgnc_gene['hgnc_symbol']
# We update the variant if some information was missing from loading
# Or if symbold in reference genes have changed
if update and has_changed:
variant_obj = store.update_variant(variant_obj)
variant_obj['comments'] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj['variant_id'], comments=True)
if variant_genes:
variant_obj.update(get_predictions(variant_genes))
if variant_obj.get('category') == 'cancer':
variant_obj.update(get_variant_info(variant_genes))
for compound_obj in compounds:
compound_obj.update(get_predictions(compound_obj.get('genes', [])))
if isinstance(variant_obj.get('acmg_classification'), int):
acmg_code = ACMG_MAP[variant_obj['acmg_classification']]
variant_obj['acmg_classification'] = ACMG_COMPLETE_MAP[acmg_code]
# convert length for SV variants
variant_length = variant_obj.get('length')
variant_obj['length'] = {100000000000: 'inf', -1: 'n.d.'}.get(variant_length, variant_length)
if not 'end_chrom' in variant_obj:
variant_obj['end_chrom'] = variant_obj['chromosome']
return variant_obj | Parse information about variants.
- Adds information about compounds
- Updates the information about compounds if necessary and 'update=True'
Args:
store(scout.adapter.MongoAdapter)
institute_obj(scout.models.Institute)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
update(bool): If variant should be updated in database
genome_build(str) | Below is the the instruction that describes the task:
### Input:
Parse information about variants.
- Adds information about compounds
- Updates the information about compounds if necessary and 'update=True'
Args:
store(scout.adapter.MongoAdapter)
institute_obj(scout.models.Institute)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
update(bool): If variant should be updated in database
genome_build(str)
### Response:
def parse_variant(store, institute_obj, case_obj, variant_obj, update=False, genome_build='37',
get_compounds = True):
"""Parse information about variants.
- Adds information about compounds
- Updates the information about compounds if necessary and 'update=True'
Args:
store(scout.adapter.MongoAdapter)
institute_obj(scout.models.Institute)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
update(bool): If variant should be updated in database
genome_build(str)
"""
has_changed = False
compounds = variant_obj.get('compounds', [])
if compounds and get_compounds:
# Check if we need to add compound information
# If it is the first time the case is viewed we fill in some compound information
if 'not_loaded' not in compounds[0]:
new_compounds = store.update_variant_compounds(variant_obj)
variant_obj['compounds'] = new_compounds
has_changed = True
# sort compounds on combined rank score
variant_obj['compounds'] = sorted(variant_obj['compounds'],
key=lambda compound: -compound['combined_score'])
# Update the hgnc symbols if they are incorrect
variant_genes = variant_obj.get('genes')
if variant_genes is not None:
for gene_obj in variant_genes:
# If there is no hgnc id there is nothin we can do
if not gene_obj['hgnc_id']:
continue
# Else we collect the gene object and check the id
if gene_obj.get('hgnc_symbol') is None:
hgnc_gene = store.hgnc_gene(gene_obj['hgnc_id'], build=genome_build)
if not hgnc_gene:
continue
has_changed = True
gene_obj['hgnc_symbol'] = hgnc_gene['hgnc_symbol']
# We update the variant if some information was missing from loading
# Or if symbold in reference genes have changed
if update and has_changed:
variant_obj = store.update_variant(variant_obj)
variant_obj['comments'] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj['variant_id'], comments=True)
if variant_genes:
variant_obj.update(get_predictions(variant_genes))
if variant_obj.get('category') == 'cancer':
variant_obj.update(get_variant_info(variant_genes))
for compound_obj in compounds:
compound_obj.update(get_predictions(compound_obj.get('genes', [])))
if isinstance(variant_obj.get('acmg_classification'), int):
acmg_code = ACMG_MAP[variant_obj['acmg_classification']]
variant_obj['acmg_classification'] = ACMG_COMPLETE_MAP[acmg_code]
# convert length for SV variants
variant_length = variant_obj.get('length')
variant_obj['length'] = {100000000000: 'inf', -1: 'n.d.'}.get(variant_length, variant_length)
if not 'end_chrom' in variant_obj:
variant_obj['end_chrom'] = variant_obj['chromosome']
return variant_obj |
def match_time_series(self, timeseries1, timeseries2):
"""Return two lists of the two input time series with matching dates
:param TimeSeries timeseries1: The first timeseries
:param TimeSeries timeseries2: The second timeseries
:return: Two two dimensional lists containing the matched values,
:rtype: two List
"""
time1 = map(lambda item: item[0], timeseries1.to_twodim_list())
time2 = map(lambda item: item[0], timeseries2.to_twodim_list())
matches = filter(lambda x: (x in time1), time2)
listX = filter(lambda x: (x[0] in matches), timeseries1.to_twodim_list())
listY = filter(lambda x: (x[0] in matches), timeseries2.to_twodim_list())
return listX, listY | Return two lists of the two input time series with matching dates
:param TimeSeries timeseries1: The first timeseries
:param TimeSeries timeseries2: The second timeseries
:return: Two two dimensional lists containing the matched values,
:rtype: two List | Below is the the instruction that describes the task:
### Input:
Return two lists of the two input time series with matching dates
:param TimeSeries timeseries1: The first timeseries
:param TimeSeries timeseries2: The second timeseries
:return: Two two dimensional lists containing the matched values,
:rtype: two List
### Response:
def match_time_series(self, timeseries1, timeseries2):
"""Return two lists of the two input time series with matching dates
:param TimeSeries timeseries1: The first timeseries
:param TimeSeries timeseries2: The second timeseries
:return: Two two dimensional lists containing the matched values,
:rtype: two List
"""
time1 = map(lambda item: item[0], timeseries1.to_twodim_list())
time2 = map(lambda item: item[0], timeseries2.to_twodim_list())
matches = filter(lambda x: (x in time1), time2)
listX = filter(lambda x: (x[0] in matches), timeseries1.to_twodim_list())
listY = filter(lambda x: (x[0] in matches), timeseries2.to_twodim_list())
return listX, listY |
def calc_flooddischarge_v1(self):
"""Calculate the discharge during and after a flood event based on an
|anntools.SeasonalANN| describing the relationship(s) between discharge
and water stage.
Required control parameter:
|WaterLevel2FloodDischarge|
Required derived parameter:
|dam_derived.TOY|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|FloodDischarge|
Example:
The control parameter |WaterLevel2FloodDischarge| is derived from
|SeasonalParameter|. This allows to simulate different seasonal
dam control schemes. To show that the seasonal selection mechanism
is implemented properly, we define a short simulation period of
three days:
>>> from hydpy import pub
>>> pub.timegrids = '2001.01.01', '2001.01.04', '1d'
Now we prepare a dam model and define two different relationships
between water level and flood discharge. The first relatively
simple relationship (for January, 2) is based on two neurons
contained in a single hidden layer and is used in the following
example. The second neural network (for January, 3) is not
applied at all, which is why we do not need to assign any parameter
values to it:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> waterlevel2flooddischarge(
... _01_02_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1,
... weights_input=[[50., 4]],
... weights_output=[[2.], [30]],
... intercepts_hidden=[[-13000, -1046]],
... intercepts_output=[0.]),
... _01_03_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1))
>>> derived.toy.update()
>>> model.idx_sim = pub.timegrids.sim['2001.01.02']
The following example shows two distinct effects of both neurons
in the first network. One neuron describes a relatively sharp
increase between 259.8 and 260.2 meters from about 0 to 2 m³/s.
This could describe a release of water through a bottom outlet
controlled by a valve. The add something like an exponential
increase between 260 and 261 meters, which could describe the
uncontrolled flow over a spillway:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_flooddischarge_v1,
... last_example=21,
... parseqs=(aides.waterlevel,
... fluxes.flooddischarge))
>>> test.nexts.waterlevel = numpy.arange(257, 261.1, 0.2)
>>> test()
| ex. | waterlevel | flooddischarge |
-------------------------------------
| 1 | 257.0 | 0.0 |
| 2 | 257.2 | 0.000001 |
| 3 | 257.4 | 0.000002 |
| 4 | 257.6 | 0.000005 |
| 5 | 257.8 | 0.000011 |
| 6 | 258.0 | 0.000025 |
| 7 | 258.2 | 0.000056 |
| 8 | 258.4 | 0.000124 |
| 9 | 258.6 | 0.000275 |
| 10 | 258.8 | 0.000612 |
| 11 | 259.0 | 0.001362 |
| 12 | 259.2 | 0.003031 |
| 13 | 259.4 | 0.006745 |
| 14 | 259.6 | 0.015006 |
| 15 | 259.8 | 0.033467 |
| 16 | 260.0 | 1.074179 |
| 17 | 260.2 | 2.164498 |
| 18 | 260.4 | 2.363853 |
| 19 | 260.6 | 2.79791 |
| 20 | 260.8 | 3.719725 |
| 21 | 261.0 | 5.576088 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
aid = self.sequences.aides.fastaccess
con.waterlevel2flooddischarge.inputs[0] = aid.waterlevel
con.waterlevel2flooddischarge.process_actual_input(der.toy[self.idx_sim])
flu.flooddischarge = con.waterlevel2flooddischarge.outputs[0] | Calculate the discharge during and after a flood event based on an
|anntools.SeasonalANN| describing the relationship(s) between discharge
and water stage.
Required control parameter:
|WaterLevel2FloodDischarge|
Required derived parameter:
|dam_derived.TOY|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|FloodDischarge|
Example:
The control parameter |WaterLevel2FloodDischarge| is derived from
|SeasonalParameter|. This allows to simulate different seasonal
dam control schemes. To show that the seasonal selection mechanism
is implemented properly, we define a short simulation period of
three days:
>>> from hydpy import pub
>>> pub.timegrids = '2001.01.01', '2001.01.04', '1d'
Now we prepare a dam model and define two different relationships
between water level and flood discharge. The first relatively
simple relationship (for January, 2) is based on two neurons
contained in a single hidden layer and is used in the following
example. The second neural network (for January, 3) is not
applied at all, which is why we do not need to assign any parameter
values to it:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> waterlevel2flooddischarge(
... _01_02_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1,
... weights_input=[[50., 4]],
... weights_output=[[2.], [30]],
... intercepts_hidden=[[-13000, -1046]],
... intercepts_output=[0.]),
... _01_03_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1))
>>> derived.toy.update()
>>> model.idx_sim = pub.timegrids.sim['2001.01.02']
The following example shows two distinct effects of both neurons
in the first network. One neuron describes a relatively sharp
increase between 259.8 and 260.2 meters from about 0 to 2 m³/s.
This could describe a release of water through a bottom outlet
controlled by a valve. The add something like an exponential
increase between 260 and 261 meters, which could describe the
uncontrolled flow over a spillway:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_flooddischarge_v1,
... last_example=21,
... parseqs=(aides.waterlevel,
... fluxes.flooddischarge))
>>> test.nexts.waterlevel = numpy.arange(257, 261.1, 0.2)
>>> test()
| ex. | waterlevel | flooddischarge |
-------------------------------------
| 1 | 257.0 | 0.0 |
| 2 | 257.2 | 0.000001 |
| 3 | 257.4 | 0.000002 |
| 4 | 257.6 | 0.000005 |
| 5 | 257.8 | 0.000011 |
| 6 | 258.0 | 0.000025 |
| 7 | 258.2 | 0.000056 |
| 8 | 258.4 | 0.000124 |
| 9 | 258.6 | 0.000275 |
| 10 | 258.8 | 0.000612 |
| 11 | 259.0 | 0.001362 |
| 12 | 259.2 | 0.003031 |
| 13 | 259.4 | 0.006745 |
| 14 | 259.6 | 0.015006 |
| 15 | 259.8 | 0.033467 |
| 16 | 260.0 | 1.074179 |
| 17 | 260.2 | 2.164498 |
| 18 | 260.4 | 2.363853 |
| 19 | 260.6 | 2.79791 |
| 20 | 260.8 | 3.719725 |
| 21 | 261.0 | 5.576088 | | Below is the the instruction that describes the task:
### Input:
Calculate the discharge during and after a flood event based on an
|anntools.SeasonalANN| describing the relationship(s) between discharge
and water stage.
Required control parameter:
|WaterLevel2FloodDischarge|
Required derived parameter:
|dam_derived.TOY|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|FloodDischarge|
Example:
The control parameter |WaterLevel2FloodDischarge| is derived from
|SeasonalParameter|. This allows to simulate different seasonal
dam control schemes. To show that the seasonal selection mechanism
is implemented properly, we define a short simulation period of
three days:
>>> from hydpy import pub
>>> pub.timegrids = '2001.01.01', '2001.01.04', '1d'
Now we prepare a dam model and define two different relationships
between water level and flood discharge. The first relatively
simple relationship (for January, 2) is based on two neurons
contained in a single hidden layer and is used in the following
example. The second neural network (for January, 3) is not
applied at all, which is why we do not need to assign any parameter
values to it:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> waterlevel2flooddischarge(
... _01_02_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1,
... weights_input=[[50., 4]],
... weights_output=[[2.], [30]],
... intercepts_hidden=[[-13000, -1046]],
... intercepts_output=[0.]),
... _01_03_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1))
>>> derived.toy.update()
>>> model.idx_sim = pub.timegrids.sim['2001.01.02']
The following example shows two distinct effects of both neurons
in the first network. One neuron describes a relatively sharp
increase between 259.8 and 260.2 meters from about 0 to 2 m³/s.
This could describe a release of water through a bottom outlet
controlled by a valve. The add something like an exponential
increase between 260 and 261 meters, which could describe the
uncontrolled flow over a spillway:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_flooddischarge_v1,
... last_example=21,
... parseqs=(aides.waterlevel,
... fluxes.flooddischarge))
>>> test.nexts.waterlevel = numpy.arange(257, 261.1, 0.2)
>>> test()
| ex. | waterlevel | flooddischarge |
-------------------------------------
| 1 | 257.0 | 0.0 |
| 2 | 257.2 | 0.000001 |
| 3 | 257.4 | 0.000002 |
| 4 | 257.6 | 0.000005 |
| 5 | 257.8 | 0.000011 |
| 6 | 258.0 | 0.000025 |
| 7 | 258.2 | 0.000056 |
| 8 | 258.4 | 0.000124 |
| 9 | 258.6 | 0.000275 |
| 10 | 258.8 | 0.000612 |
| 11 | 259.0 | 0.001362 |
| 12 | 259.2 | 0.003031 |
| 13 | 259.4 | 0.006745 |
| 14 | 259.6 | 0.015006 |
| 15 | 259.8 | 0.033467 |
| 16 | 260.0 | 1.074179 |
| 17 | 260.2 | 2.164498 |
| 18 | 260.4 | 2.363853 |
| 19 | 260.6 | 2.79791 |
| 20 | 260.8 | 3.719725 |
| 21 | 261.0 | 5.576088 |
### Response:
def calc_flooddischarge_v1(self):
"""Calculate the discharge during and after a flood event based on an
|anntools.SeasonalANN| describing the relationship(s) between discharge
and water stage.
Required control parameter:
|WaterLevel2FloodDischarge|
Required derived parameter:
|dam_derived.TOY|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|FloodDischarge|
Example:
The control parameter |WaterLevel2FloodDischarge| is derived from
|SeasonalParameter|. This allows to simulate different seasonal
dam control schemes. To show that the seasonal selection mechanism
is implemented properly, we define a short simulation period of
three days:
>>> from hydpy import pub
>>> pub.timegrids = '2001.01.01', '2001.01.04', '1d'
Now we prepare a dam model and define two different relationships
between water level and flood discharge. The first relatively
simple relationship (for January, 2) is based on two neurons
contained in a single hidden layer and is used in the following
example. The second neural network (for January, 3) is not
applied at all, which is why we do not need to assign any parameter
values to it:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> waterlevel2flooddischarge(
... _01_02_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1,
... weights_input=[[50., 4]],
... weights_output=[[2.], [30]],
... intercepts_hidden=[[-13000, -1046]],
... intercepts_output=[0.]),
... _01_03_12 = ann(nmb_inputs=1,
... nmb_neurons=(2,),
... nmb_outputs=1))
>>> derived.toy.update()
>>> model.idx_sim = pub.timegrids.sim['2001.01.02']
The following example shows two distinct effects of both neurons
in the first network. One neuron describes a relatively sharp
increase between 259.8 and 260.2 meters from about 0 to 2 m³/s.
This could describe a release of water through a bottom outlet
controlled by a valve. The add something like an exponential
increase between 260 and 261 meters, which could describe the
uncontrolled flow over a spillway:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_flooddischarge_v1,
... last_example=21,
... parseqs=(aides.waterlevel,
... fluxes.flooddischarge))
>>> test.nexts.waterlevel = numpy.arange(257, 261.1, 0.2)
>>> test()
| ex. | waterlevel | flooddischarge |
-------------------------------------
| 1 | 257.0 | 0.0 |
| 2 | 257.2 | 0.000001 |
| 3 | 257.4 | 0.000002 |
| 4 | 257.6 | 0.000005 |
| 5 | 257.8 | 0.000011 |
| 6 | 258.0 | 0.000025 |
| 7 | 258.2 | 0.000056 |
| 8 | 258.4 | 0.000124 |
| 9 | 258.6 | 0.000275 |
| 10 | 258.8 | 0.000612 |
| 11 | 259.0 | 0.001362 |
| 12 | 259.2 | 0.003031 |
| 13 | 259.4 | 0.006745 |
| 14 | 259.6 | 0.015006 |
| 15 | 259.8 | 0.033467 |
| 16 | 260.0 | 1.074179 |
| 17 | 260.2 | 2.164498 |
| 18 | 260.4 | 2.363853 |
| 19 | 260.6 | 2.79791 |
| 20 | 260.8 | 3.719725 |
| 21 | 261.0 | 5.576088 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
aid = self.sequences.aides.fastaccess
con.waterlevel2flooddischarge.inputs[0] = aid.waterlevel
con.waterlevel2flooddischarge.process_actual_input(der.toy[self.idx_sim])
flu.flooddischarge = con.waterlevel2flooddischarge.outputs[0] |
def validate(self):
"""Validate request data before sending it out. Return True/False."""
# check if required_fields aren't present
for field in set(self.required_fields) - set(self.request_data):
if not isinstance(field, string_types):
# field was a collection, iterate over it and check by OR
return bool(set(field) & set(self.request_data))
return False
return True | Validate request data before sending it out. Return True/False. | Below is the the instruction that describes the task:
### Input:
Validate request data before sending it out. Return True/False.
### Response:
def validate(self):
"""Validate request data before sending it out. Return True/False."""
# check if required_fields aren't present
for field in set(self.required_fields) - set(self.request_data):
if not isinstance(field, string_types):
# field was a collection, iterate over it and check by OR
return bool(set(field) & set(self.request_data))
return False
return True |
def find(self, item_id, basic=False, **kwargs):
"""
Get item
:param item_id: Item ID
:param basic: ?
:type item_id: int
:return: Item info
:rtype: dict
"""
if basic:
return self.transport.GET(url='/item/%d/basic' % item_id)
return self.transport.GET(kwargs, url='/item/%d' % item_id) | Get item
:param item_id: Item ID
:param basic: ?
:type item_id: int
:return: Item info
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get item
:param item_id: Item ID
:param basic: ?
:type item_id: int
:return: Item info
:rtype: dict
### Response:
def find(self, item_id, basic=False, **kwargs):
"""
Get item
:param item_id: Item ID
:param basic: ?
:type item_id: int
:return: Item info
:rtype: dict
"""
if basic:
return self.transport.GET(url='/item/%d/basic' % item_id)
return self.transport.GET(kwargs, url='/item/%d' % item_id) |
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
title = obj.Title()
description = obj.Description()
url = obj.absolute_url()
item["replace"]["Title"] = get_link(url, value=title)
item["Description"] = description
department = obj.getDepartment()
if department:
title = department.Title()
url = department.absolute_url()
item["replace"]["Department"] = get_link(url, value=title)
return item | Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item | Below is the the instruction that describes the task:
### Input:
Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
### Response:
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
title = obj.Title()
description = obj.Description()
url = obj.absolute_url()
item["replace"]["Title"] = get_link(url, value=title)
item["Description"] = description
department = obj.getDepartment()
if department:
title = department.Title()
url = department.absolute_url()
item["replace"]["Department"] = get_link(url, value=title)
return item |
def chdir(directory):
"""Change the current working directory.
Args:
directory (str): Directory to go to.
"""
directory = os.path.abspath(directory)
logger.info("chdir -> %s" % directory)
try:
if not os.path.isdir(directory):
logger.error(
"chdir -> %s failed! Directory does not exist!", directory
)
return False
os.chdir(directory)
return True
except Exception as e:
logger.error("chdir -> %s failed! %s" % (directory, e))
return False | Change the current working directory.
Args:
directory (str): Directory to go to. | Below is the the instruction that describes the task:
### Input:
Change the current working directory.
Args:
directory (str): Directory to go to.
### Response:
def chdir(directory):
"""Change the current working directory.
Args:
directory (str): Directory to go to.
"""
directory = os.path.abspath(directory)
logger.info("chdir -> %s" % directory)
try:
if not os.path.isdir(directory):
logger.error(
"chdir -> %s failed! Directory does not exist!", directory
)
return False
os.chdir(directory)
return True
except Exception as e:
logger.error("chdir -> %s failed! %s" % (directory, e))
return False |
def make_sources(comp_key, comp_dict):
"""Make dictionary mapping component keys to a source
or set of sources
Parameters
----------
comp_key : str
Key used to access sources
comp_dict : dict
Information used to build sources
return `OrderedDict` maping comp_key to `fermipy.roi_model.Source`
"""
srcdict = OrderedDict()
try:
comp_info = comp_dict.info
except AttributeError:
comp_info = comp_dict
try:
spectrum = comp_dict.spectrum
except AttributeError:
spectrum = None
model_type = comp_info.model_type
if model_type == 'PointSource':
srcdict[comp_key] = make_point_source(comp_info.source_name,
comp_info.src_dict)
elif model_type == 'SpatialMap':
srcdict[comp_key] = make_spatialmap_source(comp_info.source_name,
comp_info.Spatial_Filename,
spectrum)
elif model_type == 'MapCubeSource':
srcdict[comp_key] = make_mapcube_source(comp_info.source_name,
comp_info.Spatial_Filename,
spectrum)
elif model_type == 'IsoSource':
srcdict[comp_key] = make_isotropic_source(comp_info.source_name,
comp_info.Spectral_Filename,
spectrum)
elif model_type == 'CompositeSource':
srcdict[comp_key] = make_composite_source(comp_info.source_name,
spectrum)
elif model_type == 'CatalogSources':
srcdict.update(make_catalog_sources(comp_info.roi_model,
comp_info.source_names))
else:
raise ValueError("Unrecognized model_type %s" % model_type)
return srcdict | Make dictionary mapping component keys to a source
or set of sources
Parameters
----------
comp_key : str
Key used to access sources
comp_dict : dict
Information used to build sources
return `OrderedDict` maping comp_key to `fermipy.roi_model.Source` | Below is the the instruction that describes the task:
### Input:
Make dictionary mapping component keys to a source
or set of sources
Parameters
----------
comp_key : str
Key used to access sources
comp_dict : dict
Information used to build sources
return `OrderedDict` maping comp_key to `fermipy.roi_model.Source`
### Response:
def make_sources(comp_key, comp_dict):
"""Make dictionary mapping component keys to a source
or set of sources
Parameters
----------
comp_key : str
Key used to access sources
comp_dict : dict
Information used to build sources
return `OrderedDict` maping comp_key to `fermipy.roi_model.Source`
"""
srcdict = OrderedDict()
try:
comp_info = comp_dict.info
except AttributeError:
comp_info = comp_dict
try:
spectrum = comp_dict.spectrum
except AttributeError:
spectrum = None
model_type = comp_info.model_type
if model_type == 'PointSource':
srcdict[comp_key] = make_point_source(comp_info.source_name,
comp_info.src_dict)
elif model_type == 'SpatialMap':
srcdict[comp_key] = make_spatialmap_source(comp_info.source_name,
comp_info.Spatial_Filename,
spectrum)
elif model_type == 'MapCubeSource':
srcdict[comp_key] = make_mapcube_source(comp_info.source_name,
comp_info.Spatial_Filename,
spectrum)
elif model_type == 'IsoSource':
srcdict[comp_key] = make_isotropic_source(comp_info.source_name,
comp_info.Spectral_Filename,
spectrum)
elif model_type == 'CompositeSource':
srcdict[comp_key] = make_composite_source(comp_info.source_name,
spectrum)
elif model_type == 'CatalogSources':
srcdict.update(make_catalog_sources(comp_info.roi_model,
comp_info.source_names))
else:
raise ValueError("Unrecognized model_type %s" % model_type)
return srcdict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.