code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def __validate_arguments(self):
"""!
@brief Check input arguments of CLIQUE algorithm and if one of them is not correct then appropriate exception
is thrown.
"""
if len(self.__data) == 0:
raise ValueError("Empty input data. Data should contain at least one point.")
if self.__amount_intervals <= 0:
raise ValueError("Incorrect amount of intervals '%d'. Amount of intervals value should be greater than 0." % self.__amount_intervals)
if self.__density_threshold < 0:
raise ValueError("Incorrect density threshold '%f'. Density threshold should not be negative." % self.__density_threshold) | !
@brief Check input arguments of CLIQUE algorithm and if one of them is not correct then appropriate exception
is thrown. | Below is the the instruction that describes the task:
### Input:
!
@brief Check input arguments of CLIQUE algorithm and if one of them is not correct then appropriate exception
is thrown.
### Response:
def __validate_arguments(self):
"""!
@brief Check input arguments of CLIQUE algorithm and if one of them is not correct then appropriate exception
is thrown.
"""
if len(self.__data) == 0:
raise ValueError("Empty input data. Data should contain at least one point.")
if self.__amount_intervals <= 0:
raise ValueError("Incorrect amount of intervals '%d'. Amount of intervals value should be greater than 0." % self.__amount_intervals)
if self.__density_threshold < 0:
raise ValueError("Incorrect density threshold '%f'. Density threshold should not be negative." % self.__density_threshold) |
def addGraphic(self, typ='basic'):
"""
Adds a new graphic to the scene.
:param typ | <str>
:return <XWalkthroughGraphic> || None
"""
cls = XWalkthroughGraphic.find(typ)
if not cls:
return None
graphic = cls()
self.addItem(graphic)
return graphic | Adds a new graphic to the scene.
:param typ | <str>
:return <XWalkthroughGraphic> || None | Below is the the instruction that describes the task:
### Input:
Adds a new graphic to the scene.
:param typ | <str>
:return <XWalkthroughGraphic> || None
### Response:
def addGraphic(self, typ='basic'):
"""
Adds a new graphic to the scene.
:param typ | <str>
:return <XWalkthroughGraphic> || None
"""
cls = XWalkthroughGraphic.find(typ)
if not cls:
return None
graphic = cls()
self.addItem(graphic)
return graphic |
def add_minutes(self, datetimestr, n):
"""Returns a time that n minutes after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of minutes, value can be negative
**中文文档**
返回给定日期N分钟之后的时间。
"""
a_datetime = self.parse_datetime(datetimestr)
return a_datetime + timedelta(seconds=60 * n) | Returns a time that n minutes after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of minutes, value can be negative
**中文文档**
返回给定日期N分钟之后的时间。 | Below is the the instruction that describes the task:
### Input:
Returns a time that n minutes after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of minutes, value can be negative
**中文文档**
返回给定日期N分钟之后的时间。
### Response:
def add_minutes(self, datetimestr, n):
"""Returns a time that n minutes after a time.
:param datetimestr: a datetime object or a datetime str
:param n: number of minutes, value can be negative
**中文文档**
返回给定日期N分钟之后的时间。
"""
a_datetime = self.parse_datetime(datetimestr)
return a_datetime + timedelta(seconds=60 * n) |
def batch_predictions(
self, images, greedy=False, strict=True, return_details=False):
"""Interface to model.batch_predictions for attacks.
Parameters
----------
images : `numpy.ndarray`
Batch of inputs with shape as expected by the model.
greedy : bool
Whether the first adversarial should be returned.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
if strict:
in_bounds = self.in_bounds(images)
assert in_bounds
self._total_prediction_calls += len(images)
predictions = self.__model.batch_predictions(images)
assert predictions.ndim == 2
assert predictions.shape[0] == images.shape[0]
if return_details:
assert greedy
adversarials = []
for i in range(len(predictions)):
if strict:
in_bounds_i = True
else:
in_bounds_i = self.in_bounds(images[i])
is_adversarial, is_best, distance = self.__is_adversarial(
images[i], predictions[i], in_bounds_i)
if is_adversarial and greedy:
if return_details:
return predictions, is_adversarial, i, is_best, distance
else:
return predictions, is_adversarial, i
adversarials.append(is_adversarial)
if greedy: # pragma: no cover
# no adversarial found
if return_details:
return predictions, False, None, False, None
else:
return predictions, False, None
is_adversarial = np.array(adversarials)
assert is_adversarial.ndim == 1
assert is_adversarial.shape[0] == images.shape[0]
return predictions, is_adversarial | Interface to model.batch_predictions for attacks.
Parameters
----------
images : `numpy.ndarray`
Batch of inputs with shape as expected by the model.
greedy : bool
Whether the first adversarial should be returned.
strict : bool
Controls if the bounds for the pixel values should be checked. | Below is the the instruction that describes the task:
### Input:
Interface to model.batch_predictions for attacks.
Parameters
----------
images : `numpy.ndarray`
Batch of inputs with shape as expected by the model.
greedy : bool
Whether the first adversarial should be returned.
strict : bool
Controls if the bounds for the pixel values should be checked.
### Response:
def batch_predictions(
self, images, greedy=False, strict=True, return_details=False):
"""Interface to model.batch_predictions for attacks.
Parameters
----------
images : `numpy.ndarray`
Batch of inputs with shape as expected by the model.
greedy : bool
Whether the first adversarial should be returned.
strict : bool
Controls if the bounds for the pixel values should be checked.
"""
if strict:
in_bounds = self.in_bounds(images)
assert in_bounds
self._total_prediction_calls += len(images)
predictions = self.__model.batch_predictions(images)
assert predictions.ndim == 2
assert predictions.shape[0] == images.shape[0]
if return_details:
assert greedy
adversarials = []
for i in range(len(predictions)):
if strict:
in_bounds_i = True
else:
in_bounds_i = self.in_bounds(images[i])
is_adversarial, is_best, distance = self.__is_adversarial(
images[i], predictions[i], in_bounds_i)
if is_adversarial and greedy:
if return_details:
return predictions, is_adversarial, i, is_best, distance
else:
return predictions, is_adversarial, i
adversarials.append(is_adversarial)
if greedy: # pragma: no cover
# no adversarial found
if return_details:
return predictions, False, None, False, None
else:
return predictions, False, None
is_adversarial = np.array(adversarials)
assert is_adversarial.ndim == 1
assert is_adversarial.shape[0] == images.shape[0]
return predictions, is_adversarial |
def ang2pix(nside, theta, phi):
r"""Convert angle :math:`\theta` :math:`\phi` to pixel.
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, theta, phi = numpy.lib.stride_tricks.broadcast_arrays(nside, theta, phi)
def equatorial(nside, tt, z):
t1 = nside * (0.5 + tt)
t2 = nside * z * 0.75
jp = (t1 - t2).astype('i8')
jm = (t1 + t2).astype('i8')
ir = nside + 1 + jp - jm # in {1, 2n + 1}
kshift = 1 - (ir & 1) # kshift=1 if ir even, 0 odd
ip = (jp + jm - nside + kshift + 1) // 2 # in {0, 4n - 1}
ip = ip % (4 * nside)
return nside * (nside - 1) * 2 + (ir - 1) * 4 * nside + ip
def polecaps(nside, tt, z, s):
tp = tt - numpy.floor(tt)
za = numpy.abs(z)
tmp = nside * s / ((1 + za) / 3) ** 0.5
mp = za > 0.99
tmp[mp] = nside[mp] * (3 *(1-za[mp])) ** 0.5
jp = (tp * tmp).astype('i8')
jm = ((1 - tp) * tmp).astype('i8')
ir = jp + jm + 1
ip = (tt * ir).astype('i8')
ip = ip % (4 * ir)
r1 = 2 * ir * (ir - 1)
r2 = 2 * ir * (ir + 1)
r = numpy.empty_like(r1)
r[z > 0] = r1[z > 0] + ip[z > 0]
r[z < 0] = 12 * nside[z < 0] * nside[z < 0] - r2[z < 0] + ip[z < 0]
return r
z = numpy.cos(theta)
s = numpy.sin(theta)
tt = (phi / (0.5 * numpy.pi) ) % 4 # in [0, 4]
result = numpy.zeros(z.shape, dtype='i8')
mask = (z < 2. / 3) & (z > -2. / 3)
result[mask] = equatorial(nside[mask], tt[mask], z[mask])
result[~mask] = polecaps(nside[~mask], tt[~mask], z[~mask], s[~mask])
return result | r"""Convert angle :math:`\theta` :math:`\phi` to pixel.
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G | Below is the the instruction that describes the task:
### Input:
r"""Convert angle :math:`\theta` :math:`\phi` to pixel.
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
### Response:
def ang2pix(nside, theta, phi):
r"""Convert angle :math:`\theta` :math:`\phi` to pixel.
This is translated from chealpix.c; but refer to Section 4.1 of
http://adsabs.harvard.edu/abs/2005ApJ...622..759G
"""
nside, theta, phi = numpy.lib.stride_tricks.broadcast_arrays(nside, theta, phi)
def equatorial(nside, tt, z):
t1 = nside * (0.5 + tt)
t2 = nside * z * 0.75
jp = (t1 - t2).astype('i8')
jm = (t1 + t2).astype('i8')
ir = nside + 1 + jp - jm # in {1, 2n + 1}
kshift = 1 - (ir & 1) # kshift=1 if ir even, 0 odd
ip = (jp + jm - nside + kshift + 1) // 2 # in {0, 4n - 1}
ip = ip % (4 * nside)
return nside * (nside - 1) * 2 + (ir - 1) * 4 * nside + ip
def polecaps(nside, tt, z, s):
tp = tt - numpy.floor(tt)
za = numpy.abs(z)
tmp = nside * s / ((1 + za) / 3) ** 0.5
mp = za > 0.99
tmp[mp] = nside[mp] * (3 *(1-za[mp])) ** 0.5
jp = (tp * tmp).astype('i8')
jm = ((1 - tp) * tmp).astype('i8')
ir = jp + jm + 1
ip = (tt * ir).astype('i8')
ip = ip % (4 * ir)
r1 = 2 * ir * (ir - 1)
r2 = 2 * ir * (ir + 1)
r = numpy.empty_like(r1)
r[z > 0] = r1[z > 0] + ip[z > 0]
r[z < 0] = 12 * nside[z < 0] * nside[z < 0] - r2[z < 0] + ip[z < 0]
return r
z = numpy.cos(theta)
s = numpy.sin(theta)
tt = (phi / (0.5 * numpy.pi) ) % 4 # in [0, 4]
result = numpy.zeros(z.shape, dtype='i8')
mask = (z < 2. / 3) & (z > -2. / 3)
result[mask] = equatorial(nside[mask], tt[mask], z[mask])
result[~mask] = polecaps(nside[~mask], tt[~mask], z[~mask], s[~mask])
return result |
def package_repositories(self):
"""
Property for accessing :class:`PackageRepositoryManager` instance, which is used to manage package repos.
:rtype: yagocd.resources.package_repository.PackageRepositoryManager
"""
if self._package_repository_manager is None:
self._package_repository_manager = PackageRepositoryManager(session=self._session)
return self._package_repository_manager | Property for accessing :class:`PackageRepositoryManager` instance, which is used to manage package repos.
:rtype: yagocd.resources.package_repository.PackageRepositoryManager | Below is the the instruction that describes the task:
### Input:
Property for accessing :class:`PackageRepositoryManager` instance, which is used to manage package repos.
:rtype: yagocd.resources.package_repository.PackageRepositoryManager
### Response:
def package_repositories(self):
"""
Property for accessing :class:`PackageRepositoryManager` instance, which is used to manage package repos.
:rtype: yagocd.resources.package_repository.PackageRepositoryManager
"""
if self._package_repository_manager is None:
self._package_repository_manager = PackageRepositoryManager(session=self._session)
return self._package_repository_manager |
def failed(self, fail_on_warnings=True):
"""Returns a boolean value describing whether the validation
succeeded or not."""
return bool(self.errors) or (fail_on_warnings and bool(self.warnings)) | Returns a boolean value describing whether the validation
succeeded or not. | Below is the the instruction that describes the task:
### Input:
Returns a boolean value describing whether the validation
succeeded or not.
### Response:
def failed(self, fail_on_warnings=True):
"""Returns a boolean value describing whether the validation
succeeded or not."""
return bool(self.errors) or (fail_on_warnings and bool(self.warnings)) |
async def get_lease_async(self, partition_id):
"""
Return the lease info for the specified partition.
Can return null if no lease has been created in the store for the specified partition.
:param partition_id: The partition ID.
:type partition_id: str
:return: lease info for the partition, or `None`.
:rtype: ~azure.eventprocessorhost.lease.Lease
"""
try:
blob = await self.host.loop.run_in_executor(
self.executor,
functools.partial(
self.storage_client.get_blob_to_text,
self.lease_container_name, partition_id))
lease = AzureBlobLease()
lease.with_blob(blob)
async def state():
"""
Allow lease to curry storage_client to get state
"""
try:
loop = asyncio.get_event_loop()
res = await loop.run_in_executor(
self.executor,
functools.partial(
self.storage_client.get_blob_properties,
self.lease_container_name,
partition_id))
return res.properties.lease.state
except Exception as err: # pylint: disable=broad-except
_logger.error("Failed to get lease state %r %r", err, partition_id)
lease.state = state
return lease
except Exception as err: # pylint: disable=broad-except
_logger.error("Failed to get lease %r %r", err, partition_id) | Return the lease info for the specified partition.
Can return null if no lease has been created in the store for the specified partition.
:param partition_id: The partition ID.
:type partition_id: str
:return: lease info for the partition, or `None`.
:rtype: ~azure.eventprocessorhost.lease.Lease | Below is the the instruction that describes the task:
### Input:
Return the lease info for the specified partition.
Can return null if no lease has been created in the store for the specified partition.
:param partition_id: The partition ID.
:type partition_id: str
:return: lease info for the partition, or `None`.
:rtype: ~azure.eventprocessorhost.lease.Lease
### Response:
async def get_lease_async(self, partition_id):
"""
Return the lease info for the specified partition.
Can return null if no lease has been created in the store for the specified partition.
:param partition_id: The partition ID.
:type partition_id: str
:return: lease info for the partition, or `None`.
:rtype: ~azure.eventprocessorhost.lease.Lease
"""
try:
blob = await self.host.loop.run_in_executor(
self.executor,
functools.partial(
self.storage_client.get_blob_to_text,
self.lease_container_name, partition_id))
lease = AzureBlobLease()
lease.with_blob(blob)
async def state():
"""
Allow lease to curry storage_client to get state
"""
try:
loop = asyncio.get_event_loop()
res = await loop.run_in_executor(
self.executor,
functools.partial(
self.storage_client.get_blob_properties,
self.lease_container_name,
partition_id))
return res.properties.lease.state
except Exception as err: # pylint: disable=broad-except
_logger.error("Failed to get lease state %r %r", err, partition_id)
lease.state = state
return lease
except Exception as err: # pylint: disable=broad-except
_logger.error("Failed to get lease %r %r", err, partition_id) |
def _load_from_environ(metadata, value_func=None):
"""
Load configuration from environment variables.
Any environment variable prefixed with the metadata's name will be
used to recursively set dictionary keys, splitting on '__'.
:param value_func: a mutator for the envvar's value (if any)
"""
# We'll match the ennvar name against the metadata's name. The ennvar
# name must be uppercase and hyphens in names converted to underscores.
#
# | envar | name | matches? |
# +-------------+---------+----------+
# | FOO_BAR | foo | yes |
# | FOO_BAR | bar | no |
# | foo_bar | bar | no |
# | FOO_BAR_BAZ | foo_bar | yes |
# | FOO_BAR_BAZ | foo-bar | yes |
# +-------------+---------+----------+
prefix = metadata.name.upper().replace("-", "_")
return expand_config(
environ,
separator="__",
skip_to=1,
key_parts_filter=lambda key_parts: len(key_parts) > 1 and key_parts[0] == prefix,
value_func=lambda value: value_func(value) if value_func else value,
) | Load configuration from environment variables.
Any environment variable prefixed with the metadata's name will be
used to recursively set dictionary keys, splitting on '__'.
:param value_func: a mutator for the envvar's value (if any) | Below is the the instruction that describes the task:
### Input:
Load configuration from environment variables.
Any environment variable prefixed with the metadata's name will be
used to recursively set dictionary keys, splitting on '__'.
:param value_func: a mutator for the envvar's value (if any)
### Response:
def _load_from_environ(metadata, value_func=None):
"""
Load configuration from environment variables.
Any environment variable prefixed with the metadata's name will be
used to recursively set dictionary keys, splitting on '__'.
:param value_func: a mutator for the envvar's value (if any)
"""
# We'll match the ennvar name against the metadata's name. The ennvar
# name must be uppercase and hyphens in names converted to underscores.
#
# | envar | name | matches? |
# +-------------+---------+----------+
# | FOO_BAR | foo | yes |
# | FOO_BAR | bar | no |
# | foo_bar | bar | no |
# | FOO_BAR_BAZ | foo_bar | yes |
# | FOO_BAR_BAZ | foo-bar | yes |
# +-------------+---------+----------+
prefix = metadata.name.upper().replace("-", "_")
return expand_config(
environ,
separator="__",
skip_to=1,
key_parts_filter=lambda key_parts: len(key_parts) > 1 and key_parts[0] == prefix,
value_func=lambda value: value_func(value) if value_func else value,
) |
def validateDocument(self, doc):
"""Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDocument(self._o, doc__o)
return ret | Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. | Below is the the instruction that describes the task:
### Input:
Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree.
### Response:
def validateDocument(self, doc):
"""Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDocument(self._o, doc__o)
return ret |
def reserve_ids(
self, resource, num_ids,
url_prefix, auth, session, send_opts):
"""Reserve a block of unique, sequential ids for annotations.
Args:
resource (intern.resource.Resource): Resource should be an annotation channel.
num_ids (int): Number of ids to reserve.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(int): First id reserved.
Raises:
(TypeError): resource is not a channel or not an annotation channel.
"""
if not isinstance(resource, ChannelResource):
raise TypeError('resource must be ChannelResource')
if resource.type != 'annotation':
raise TypeError('Channel is not an annotation channel')
req = self.get_reserve_request(
resource, 'GET', 'application/json', url_prefix, auth, num_ids)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
json_data = resp.json()
return int(json_data['start_id'])
msg = ('Reserve ids failed on {}, got HTTP response: ({}) - {}'.format(
resource.name, resp.status_code, resp.text))
raise HTTPError(msg, request=req, response=resp) | Reserve a block of unique, sequential ids for annotations.
Args:
resource (intern.resource.Resource): Resource should be an annotation channel.
num_ids (int): Number of ids to reserve.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(int): First id reserved.
Raises:
(TypeError): resource is not a channel or not an annotation channel. | Below is the the instruction that describes the task:
### Input:
Reserve a block of unique, sequential ids for annotations.
Args:
resource (intern.resource.Resource): Resource should be an annotation channel.
num_ids (int): Number of ids to reserve.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(int): First id reserved.
Raises:
(TypeError): resource is not a channel or not an annotation channel.
### Response:
def reserve_ids(
self, resource, num_ids,
url_prefix, auth, session, send_opts):
"""Reserve a block of unique, sequential ids for annotations.
Args:
resource (intern.resource.Resource): Resource should be an annotation channel.
num_ids (int): Number of ids to reserve.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(int): First id reserved.
Raises:
(TypeError): resource is not a channel or not an annotation channel.
"""
if not isinstance(resource, ChannelResource):
raise TypeError('resource must be ChannelResource')
if resource.type != 'annotation':
raise TypeError('Channel is not an annotation channel')
req = self.get_reserve_request(
resource, 'GET', 'application/json', url_prefix, auth, num_ids)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
json_data = resp.json()
return int(json_data['start_id'])
msg = ('Reserve ids failed on {}, got HTTP response: ({}) - {}'.format(
resource.name, resp.status_code, resp.text))
raise HTTPError(msg, request=req, response=resp) |
def check(module):
global passed, failed
'''
apply pylint to the file specified if it is a *.py file
'''
module_name = module.rsplit('/', 1)[1]
if module[-3:] == ".py" and module_name not in IGNORED_FILES:
print ("CHECKING ", module)
pout = os.popen('pylint %s'% module, 'r')
for line in pout:
if "Your code has been rated at" in line:
print ("PASSED pylint inspection: " + line)
passed += 1
return True
if "-error" in line:
print ("FAILED pylint inspection: " + line)
failed += 1
errors.append("FILE: " + module)
errors.append("FAILED pylint inspection: " + line)
return False | apply pylint to the file specified if it is a *.py file | Below is the the instruction that describes the task:
### Input:
apply pylint to the file specified if it is a *.py file
### Response:
def check(module):
global passed, failed
'''
apply pylint to the file specified if it is a *.py file
'''
module_name = module.rsplit('/', 1)[1]
if module[-3:] == ".py" and module_name not in IGNORED_FILES:
print ("CHECKING ", module)
pout = os.popen('pylint %s'% module, 'r')
for line in pout:
if "Your code has been rated at" in line:
print ("PASSED pylint inspection: " + line)
passed += 1
return True
if "-error" in line:
print ("FAILED pylint inspection: " + line)
failed += 1
errors.append("FILE: " + module)
errors.append("FAILED pylint inspection: " + line)
return False |
def get_file_to_stream(self, stream, share_name, directory_name, file_name, **kwargs):
"""
Download a file from Azure File Share.
:param stream: A filehandle to store the file to.
:type stream: file-like object
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.get_file_to_stream()` takes.
:type kwargs: object
"""
self.connection.get_file_to_stream(share_name, directory_name,
file_name, stream, **kwargs) | Download a file from Azure File Share.
:param stream: A filehandle to store the file to.
:type stream: file-like object
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.get_file_to_stream()` takes.
:type kwargs: object | Below is the the instruction that describes the task:
### Input:
Download a file from Azure File Share.
:param stream: A filehandle to store the file to.
:type stream: file-like object
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.get_file_to_stream()` takes.
:type kwargs: object
### Response:
def get_file_to_stream(self, stream, share_name, directory_name, file_name, **kwargs):
"""
Download a file from Azure File Share.
:param stream: A filehandle to store the file to.
:type stream: file-like object
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.get_file_to_stream()` takes.
:type kwargs: object
"""
self.connection.get_file_to_stream(share_name, directory_name,
file_name, stream, **kwargs) |
def queryGitHubFromFile(self, filePath, gitvars={}, verbosity=0, **kwargs):
"""Submit a GitHub GraphQL query from a file.
Can only be used with GraphQL queries.
For REST queries, see the 'queryGitHub' method.
Args:
filePath (str): A relative or absolute path to a file containing
a GraphQL query.
File may use comments and multi-line formatting.
.. _GitHub GraphQL Explorer:
https://developer.github.com/v4/explorer/
gitvars (Optional[Dict]): All query variables.
Defaults to empty.
GraphQL Only.
verbosity (Optional[int]): Changes output verbosity levels.
If < 0, all extra printouts are suppressed.
If == 0, normal print statements are displayed.
If > 0, additional status print statements are displayed.
Defaults to 0.
**kwargs: Keyword arguments for the 'queryGitHub' method.
Returns:
Dict: A JSON style dictionary.
"""
gitquery = self._readGQL(filePath, verbose=(verbosity >= 0))
return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, **kwargs) | Submit a GitHub GraphQL query from a file.
Can only be used with GraphQL queries.
For REST queries, see the 'queryGitHub' method.
Args:
filePath (str): A relative or absolute path to a file containing
a GraphQL query.
File may use comments and multi-line formatting.
.. _GitHub GraphQL Explorer:
https://developer.github.com/v4/explorer/
gitvars (Optional[Dict]): All query variables.
Defaults to empty.
GraphQL Only.
verbosity (Optional[int]): Changes output verbosity levels.
If < 0, all extra printouts are suppressed.
If == 0, normal print statements are displayed.
If > 0, additional status print statements are displayed.
Defaults to 0.
**kwargs: Keyword arguments for the 'queryGitHub' method.
Returns:
Dict: A JSON style dictionary. | Below is the the instruction that describes the task:
### Input:
Submit a GitHub GraphQL query from a file.
Can only be used with GraphQL queries.
For REST queries, see the 'queryGitHub' method.
Args:
filePath (str): A relative or absolute path to a file containing
a GraphQL query.
File may use comments and multi-line formatting.
.. _GitHub GraphQL Explorer:
https://developer.github.com/v4/explorer/
gitvars (Optional[Dict]): All query variables.
Defaults to empty.
GraphQL Only.
verbosity (Optional[int]): Changes output verbosity levels.
If < 0, all extra printouts are suppressed.
If == 0, normal print statements are displayed.
If > 0, additional status print statements are displayed.
Defaults to 0.
**kwargs: Keyword arguments for the 'queryGitHub' method.
Returns:
Dict: A JSON style dictionary.
### Response:
def queryGitHubFromFile(self, filePath, gitvars={}, verbosity=0, **kwargs):
"""Submit a GitHub GraphQL query from a file.
Can only be used with GraphQL queries.
For REST queries, see the 'queryGitHub' method.
Args:
filePath (str): A relative or absolute path to a file containing
a GraphQL query.
File may use comments and multi-line formatting.
.. _GitHub GraphQL Explorer:
https://developer.github.com/v4/explorer/
gitvars (Optional[Dict]): All query variables.
Defaults to empty.
GraphQL Only.
verbosity (Optional[int]): Changes output verbosity levels.
If < 0, all extra printouts are suppressed.
If == 0, normal print statements are displayed.
If > 0, additional status print statements are displayed.
Defaults to 0.
**kwargs: Keyword arguments for the 'queryGitHub' method.
Returns:
Dict: A JSON style dictionary.
"""
gitquery = self._readGQL(filePath, verbose=(verbosity >= 0))
return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, **kwargs) |
def plot(self, columns=None, loc=None, iloc=None, **kwargs):
""""
A wrapper around plotting. Matplotlib plot arguments can be passed in, plus:
Parameters
-----------
columns: string or list-like, optional
If not empty, plot a subset of columns from the ``cumulative_hazards_``. Default all.
loc:
iloc: slice, optional
specify a location-based subsection of the curves to plot, ex:
``.plot(iloc=slice(0,10))`` will plot the first 10 time points.
"""
from matplotlib import pyplot as plt
assert loc is None or iloc is None, "Cannot set both loc and iloc in call to .plot"
def shaded_plot(ax, x, y, y_upper, y_lower, **kwargs):
base_line, = ax.plot(x, y, drawstyle="steps-post", **kwargs)
ax.fill_between(x, y_lower, y2=y_upper, alpha=0.25, color=base_line.get_color(), linewidth=1.0, step="post")
def create_df_slicer(loc, iloc):
get_method = "loc" if loc is not None else "iloc"
if iloc is None and loc is None:
user_submitted_ix = slice(0, None)
else:
user_submitted_ix = loc if loc is not None else iloc
return lambda df: getattr(df, get_method)[user_submitted_ix]
subset_df = create_df_slicer(loc, iloc)
if not columns:
columns = self.cumulative_hazards_.columns
else:
columns = _to_list(columns)
set_kwargs_ax(kwargs)
ax = kwargs.pop("ax")
x = subset_df(self.cumulative_hazards_).index.values.astype(float)
for column in columns:
y = subset_df(self.cumulative_hazards_[column]).values
index = subset_df(self.cumulative_hazards_[column]).index
y_upper = subset_df(self.confidence_intervals_[column].loc["upper-bound"]).values
y_lower = subset_df(self.confidence_intervals_[column].loc["lower-bound"]).values
shaded_plot(ax, x, y, y_upper, y_lower, label=column, **kwargs)
plt.hlines(0, index.min() - 1, index.max(), color="k", linestyles="--", alpha=0.5)
ax.legend()
return ax | A wrapper around plotting. Matplotlib plot arguments can be passed in, plus:
Parameters
-----------
columns: string or list-like, optional
If not empty, plot a subset of columns from the ``cumulative_hazards_``. Default all.
loc:
iloc: slice, optional
specify a location-based subsection of the curves to plot, ex:
``.plot(iloc=slice(0,10))`` will plot the first 10 time points. | Below is the the instruction that describes the task:
### Input:
A wrapper around plotting. Matplotlib plot arguments can be passed in, plus:
Parameters
-----------
columns: string or list-like, optional
If not empty, plot a subset of columns from the ``cumulative_hazards_``. Default all.
loc:
iloc: slice, optional
specify a location-based subsection of the curves to plot, ex:
``.plot(iloc=slice(0,10))`` will plot the first 10 time points.
### Response:
def plot(self, columns=None, loc=None, iloc=None, **kwargs):
""""
A wrapper around plotting. Matplotlib plot arguments can be passed in, plus:
Parameters
-----------
columns: string or list-like, optional
If not empty, plot a subset of columns from the ``cumulative_hazards_``. Default all.
loc:
iloc: slice, optional
specify a location-based subsection of the curves to plot, ex:
``.plot(iloc=slice(0,10))`` will plot the first 10 time points.
"""
from matplotlib import pyplot as plt
assert loc is None or iloc is None, "Cannot set both loc and iloc in call to .plot"
def shaded_plot(ax, x, y, y_upper, y_lower, **kwargs):
base_line, = ax.plot(x, y, drawstyle="steps-post", **kwargs)
ax.fill_between(x, y_lower, y2=y_upper, alpha=0.25, color=base_line.get_color(), linewidth=1.0, step="post")
def create_df_slicer(loc, iloc):
get_method = "loc" if loc is not None else "iloc"
if iloc is None and loc is None:
user_submitted_ix = slice(0, None)
else:
user_submitted_ix = loc if loc is not None else iloc
return lambda df: getattr(df, get_method)[user_submitted_ix]
subset_df = create_df_slicer(loc, iloc)
if not columns:
columns = self.cumulative_hazards_.columns
else:
columns = _to_list(columns)
set_kwargs_ax(kwargs)
ax = kwargs.pop("ax")
x = subset_df(self.cumulative_hazards_).index.values.astype(float)
for column in columns:
y = subset_df(self.cumulative_hazards_[column]).values
index = subset_df(self.cumulative_hazards_[column]).index
y_upper = subset_df(self.confidence_intervals_[column].loc["upper-bound"]).values
y_lower = subset_df(self.confidence_intervals_[column].loc["lower-bound"]).values
shaded_plot(ax, x, y, y_upper, y_lower, label=column, **kwargs)
plt.hlines(0, index.min() - 1, index.max(), color="k", linestyles="--", alpha=0.5)
ax.legend()
return ax |
def runs_once(meth):
"""
A wrapper around Fabric's runs_once() to support our dryrun feature.
"""
from burlap.common import get_dryrun, runs_once_methods
if get_dryrun():
pass
else:
runs_once_methods.append(meth)
_runs_once(meth)
return meth | A wrapper around Fabric's runs_once() to support our dryrun feature. | Below is the the instruction that describes the task:
### Input:
A wrapper around Fabric's runs_once() to support our dryrun feature.
### Response:
def runs_once(meth):
"""
A wrapper around Fabric's runs_once() to support our dryrun feature.
"""
from burlap.common import get_dryrun, runs_once_methods
if get_dryrun():
pass
else:
runs_once_methods.append(meth)
_runs_once(meth)
return meth |
def associate_failure_node(self, parent, child=None, **kwargs):
"""Add a node to run on failure.
=====API DOCS=====
Add a node to run on failure.
:param parent: Primary key of parent node to associate failure node to.
:type parent: int
:param child: Primary key of child node to be associated.
:type child: int
:param `**kwargs`: Fields used to create child node if ``child`` is not provided.
:returns: Dictionary of only one key "changed", which indicates whether the association succeeded.
:rtype: dict
=====API DOCS=====
"""
return self._assoc_or_create('failure', parent, child, **kwargs) | Add a node to run on failure.
=====API DOCS=====
Add a node to run on failure.
:param parent: Primary key of parent node to associate failure node to.
:type parent: int
:param child: Primary key of child node to be associated.
:type child: int
:param `**kwargs`: Fields used to create child node if ``child`` is not provided.
:returns: Dictionary of only one key "changed", which indicates whether the association succeeded.
:rtype: dict
=====API DOCS===== | Below is the the instruction that describes the task:
### Input:
Add a node to run on failure.
=====API DOCS=====
Add a node to run on failure.
:param parent: Primary key of parent node to associate failure node to.
:type parent: int
:param child: Primary key of child node to be associated.
:type child: int
:param `**kwargs`: Fields used to create child node if ``child`` is not provided.
:returns: Dictionary of only one key "changed", which indicates whether the association succeeded.
:rtype: dict
=====API DOCS=====
### Response:
def associate_failure_node(self, parent, child=None, **kwargs):
"""Add a node to run on failure.
=====API DOCS=====
Add a node to run on failure.
:param parent: Primary key of parent node to associate failure node to.
:type parent: int
:param child: Primary key of child node to be associated.
:type child: int
:param `**kwargs`: Fields used to create child node if ``child`` is not provided.
:returns: Dictionary of only one key "changed", which indicates whether the association succeeded.
:rtype: dict
=====API DOCS=====
"""
return self._assoc_or_create('failure', parent, child, **kwargs) |
def recipe_weinreb17(adata, log=True, mean_threshold=0.01, cv_threshold=2,
n_pcs=50, svd_solver='randomized', random_state=0,
copy=False):
"""Normalization and filtering as of [Weinreb17]_.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
copy : bool (default: False)
Return a copy if true.
"""
from scipy.sparse import issparse
if issparse(adata.X):
raise ValueError('`recipe_weinreb16 does not support sparse matrices.')
if copy: adata = adata.copy()
if log: pp.log1p(adata)
adata.X = pp.normalize_per_cell_weinreb16_deprecated(adata.X,
max_fraction=0.05,
mult_with_mean=True)
gene_subset = filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold)
adata._inplace_subset_var(gene_subset) # this modifies the object itself
X_pca = pp.pca(pp.zscore_deprecated(adata.X),
n_comps=n_pcs, svd_solver=svd_solver, random_state=random_state)
# update adata
adata.obsm['X_pca'] = X_pca
return adata if copy else None | Normalization and filtering as of [Weinreb17]_.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
copy : bool (default: False)
Return a copy if true. | Below is the the instruction that describes the task:
### Input:
Normalization and filtering as of [Weinreb17]_.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
copy : bool (default: False)
Return a copy if true.
### Response:
def recipe_weinreb17(adata, log=True, mean_threshold=0.01, cv_threshold=2,
n_pcs=50, svd_solver='randomized', random_state=0,
copy=False):
"""Normalization and filtering as of [Weinreb17]_.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
copy : bool (default: False)
Return a copy if true.
"""
from scipy.sparse import issparse
if issparse(adata.X):
raise ValueError('`recipe_weinreb16 does not support sparse matrices.')
if copy: adata = adata.copy()
if log: pp.log1p(adata)
adata.X = pp.normalize_per_cell_weinreb16_deprecated(adata.X,
max_fraction=0.05,
mult_with_mean=True)
gene_subset = filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold)
adata._inplace_subset_var(gene_subset) # this modifies the object itself
X_pca = pp.pca(pp.zscore_deprecated(adata.X),
n_comps=n_pcs, svd_solver=svd_solver, random_state=random_state)
# update adata
adata.obsm['X_pca'] = X_pca
return adata if copy else None |
def extend(self, data, size):
"""
Append user-supplied data to chunk, return resulting chunk size. If the
data would exceeded the available space, the chunk grows in size.
"""
return lib.zchunk_extend(self._as_parameter_, data, size) | Append user-supplied data to chunk, return resulting chunk size. If the
data would exceeded the available space, the chunk grows in size. | Below is the the instruction that describes the task:
### Input:
Append user-supplied data to chunk, return resulting chunk size. If the
data would exceeded the available space, the chunk grows in size.
### Response:
def extend(self, data, size):
"""
Append user-supplied data to chunk, return resulting chunk size. If the
data would exceeded the available space, the chunk grows in size.
"""
return lib.zchunk_extend(self._as_parameter_, data, size) |
def _set_traffic_class(self, v, load=False):
"""
Setter method for traffic_class, mapped from YANG variable /interface/port_channel/qos/random_detect/traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_class() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("red_tc_value red_dp_value",traffic_class.traffic_class, yang_name="traffic-class", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='red-tc-value red-dp-value', extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}), is_container='list', yang_name="traffic-class", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """traffic_class must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("red_tc_value red_dp_value",traffic_class.traffic_class, yang_name="traffic-class", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='red-tc-value red-dp-value', extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}), is_container='list', yang_name="traffic-class", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__traffic_class = t
if hasattr(self, '_set'):
self._set() | Setter method for traffic_class, mapped from YANG variable /interface/port_channel/qos/random_detect/traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_class() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for traffic_class, mapped from YANG variable /interface/port_channel/qos/random_detect/traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_class() directly.
### Response:
def _set_traffic_class(self, v, load=False):
"""
Setter method for traffic_class, mapped from YANG variable /interface/port_channel/qos/random_detect/traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_class() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("red_tc_value red_dp_value",traffic_class.traffic_class, yang_name="traffic-class", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='red-tc-value red-dp-value', extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}), is_container='list', yang_name="traffic-class", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """traffic_class must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("red_tc_value red_dp_value",traffic_class.traffic_class, yang_name="traffic-class", rest_name="traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='red-tc-value red-dp-value', extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}), is_container='list', yang_name="traffic-class", rest_name="traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'traffic-class to configure RED on', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-no': None, u'cli-suppress-list-no': None, u'cli-run-template-enter': u'$(.?:)', u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__traffic_class = t
if hasattr(self, '_set'):
self._set() |
def all_host_infos():
'''
Summarize all host information.
'''
output = []
output.append(["Operating system", os()])
output.append(["CPUID information", cpu()])
output.append(["CC information", compiler()])
output.append(["JDK information", from_cmd("java -version")])
output.append(["MPI information", from_cmd("mpirun -version")])
output.append(["Scala information", from_cmd("scala -version")])
output.append(["OpenCL headers", from_cmd(
"find /usr/include|grep opencl.h")])
output.append(["OpenCL libraries", from_cmd(
"find /usr/lib/ -iname '*opencl*'")])
output.append(["NVidia SMI", from_cmd("nvidia-smi -q")])
output.append(["OpenCL Details", opencl()])
return output | Summarize all host information. | Below is the the instruction that describes the task:
### Input:
Summarize all host information.
### Response:
def all_host_infos():
'''
Summarize all host information.
'''
output = []
output.append(["Operating system", os()])
output.append(["CPUID information", cpu()])
output.append(["CC information", compiler()])
output.append(["JDK information", from_cmd("java -version")])
output.append(["MPI information", from_cmd("mpirun -version")])
output.append(["Scala information", from_cmd("scala -version")])
output.append(["OpenCL headers", from_cmd(
"find /usr/include|grep opencl.h")])
output.append(["OpenCL libraries", from_cmd(
"find /usr/lib/ -iname '*opencl*'")])
output.append(["NVidia SMI", from_cmd("nvidia-smi -q")])
output.append(["OpenCL Details", opencl()])
return output |
def component_title(component):
"""
Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc.
"""
title = u''
label_text = u''
title_text = u''
if component.get('label'):
label_text = component.get('label')
if component.get('title'):
title_text = component.get('title')
title = unicode_value(label_text)
if label_text != '' and title_text != '':
title += ' '
title += unicode_value(title_text)
if component.get('type') == 'abstract' and title == '':
title = 'Abstract'
return title | Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc. | Below is the the instruction that describes the task:
### Input:
Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc.
### Response:
def component_title(component):
"""
Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc.
"""
title = u''
label_text = u''
title_text = u''
if component.get('label'):
label_text = component.get('label')
if component.get('title'):
title_text = component.get('title')
title = unicode_value(label_text)
if label_text != '' and title_text != '':
title += ' '
title += unicode_value(title_text)
if component.get('type') == 'abstract' and title == '':
title = 'Abstract'
return title |
def lowpass(var, key, factor):
'''a simple lowpass filter'''
global lowpass_data
if not key in lowpass_data:
lowpass_data[key] = var
else:
lowpass_data[key] = factor*lowpass_data[key] + (1.0 - factor)*var
return lowpass_data[key] | a simple lowpass filter | Below is the the instruction that describes the task:
### Input:
a simple lowpass filter
### Response:
def lowpass(var, key, factor):
'''a simple lowpass filter'''
global lowpass_data
if not key in lowpass_data:
lowpass_data[key] = var
else:
lowpass_data[key] = factor*lowpass_data[key] + (1.0 - factor)*var
return lowpass_data[key] |
def is_compatible_with(self, spec_or_tensor):
"""Returns True if spec_or_tensor is compatible with this TensorSpec.
Two tensors are considered compatible if they have the same dtype
and their shapes are compatible (see `tf.TensorShape.is_compatible_with`).
Args:
spec_or_tensor: A tf.TensorSpec or a tf.Tensor
Returns:
True if spec_or_tensor is compatible with self.
"""
return (self._dtype.is_compatible_with(spec_or_tensor.dtype) and
self._shape.is_compatible_with(spec_or_tensor.shape)) | Returns True if spec_or_tensor is compatible with this TensorSpec.
Two tensors are considered compatible if they have the same dtype
and their shapes are compatible (see `tf.TensorShape.is_compatible_with`).
Args:
spec_or_tensor: A tf.TensorSpec or a tf.Tensor
Returns:
True if spec_or_tensor is compatible with self. | Below is the the instruction that describes the task:
### Input:
Returns True if spec_or_tensor is compatible with this TensorSpec.
Two tensors are considered compatible if they have the same dtype
and their shapes are compatible (see `tf.TensorShape.is_compatible_with`).
Args:
spec_or_tensor: A tf.TensorSpec or a tf.Tensor
Returns:
True if spec_or_tensor is compatible with self.
### Response:
def is_compatible_with(self, spec_or_tensor):
"""Returns True if spec_or_tensor is compatible with this TensorSpec.
Two tensors are considered compatible if they have the same dtype
and their shapes are compatible (see `tf.TensorShape.is_compatible_with`).
Args:
spec_or_tensor: A tf.TensorSpec or a tf.Tensor
Returns:
True if spec_or_tensor is compatible with self.
"""
return (self._dtype.is_compatible_with(spec_or_tensor.dtype) and
self._shape.is_compatible_with(spec_or_tensor.shape)) |
def get_urlclass_from (scheme, assume_local_file=False):
"""Return checker class for given URL scheme. If the scheme
cannot be matched and assume_local_file is True, assume a local file.
"""
if scheme in ("http", "https"):
klass = httpurl.HttpUrl
elif scheme == "ftp":
klass = ftpurl.FtpUrl
elif scheme == "file":
klass = fileurl.FileUrl
elif scheme == "telnet":
klass = telneturl.TelnetUrl
elif scheme == "mailto":
klass = mailtourl.MailtoUrl
elif scheme in ("nntp", "news", "snews"):
klass = nntpurl.NntpUrl
elif scheme == "dns":
klass = dnsurl.DnsUrl
elif scheme == "itms-services":
klass = itmsservicesurl.ItmsServicesUrl
elif scheme and unknownurl.is_unknown_scheme(scheme):
klass = unknownurl.UnknownUrl
elif assume_local_file:
klass = fileurl.FileUrl
else:
klass = unknownurl.UnknownUrl
return klass | Return checker class for given URL scheme. If the scheme
cannot be matched and assume_local_file is True, assume a local file. | Below is the the instruction that describes the task:
### Input:
Return checker class for given URL scheme. If the scheme
cannot be matched and assume_local_file is True, assume a local file.
### Response:
def get_urlclass_from (scheme, assume_local_file=False):
"""Return checker class for given URL scheme. If the scheme
cannot be matched and assume_local_file is True, assume a local file.
"""
if scheme in ("http", "https"):
klass = httpurl.HttpUrl
elif scheme == "ftp":
klass = ftpurl.FtpUrl
elif scheme == "file":
klass = fileurl.FileUrl
elif scheme == "telnet":
klass = telneturl.TelnetUrl
elif scheme == "mailto":
klass = mailtourl.MailtoUrl
elif scheme in ("nntp", "news", "snews"):
klass = nntpurl.NntpUrl
elif scheme == "dns":
klass = dnsurl.DnsUrl
elif scheme == "itms-services":
klass = itmsservicesurl.ItmsServicesUrl
elif scheme and unknownurl.is_unknown_scheme(scheme):
klass = unknownurl.UnknownUrl
elif assume_local_file:
klass = fileurl.FileUrl
else:
klass = unknownurl.UnknownUrl
return klass |
def list_tags(cwd,
user=None,
password=None,
ignore_retcode=False,
output_encoding=None):
'''
.. versionadded:: 2015.8.0
Return a list of tags
cwd
The path to the git checkout
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
CLI Examples:
.. code-block:: bash
salt myminion git.list_tags /path/to/repo
'''
cwd = _expand_path(cwd, user)
command = ['git', 'for-each-ref', '--format', '%(refname:short)',
'refs/tags/']
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)['stdout'].splitlines() | .. versionadded:: 2015.8.0
Return a list of tags
cwd
The path to the git checkout
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
CLI Examples:
.. code-block:: bash
salt myminion git.list_tags /path/to/repo | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.8.0
Return a list of tags
cwd
The path to the git checkout
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
CLI Examples:
.. code-block:: bash
salt myminion git.list_tags /path/to/repo
### Response:
def list_tags(cwd,
user=None,
password=None,
ignore_retcode=False,
output_encoding=None):
'''
.. versionadded:: 2015.8.0
Return a list of tags
cwd
The path to the git checkout
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
CLI Examples:
.. code-block:: bash
salt myminion git.list_tags /path/to/repo
'''
cwd = _expand_path(cwd, user)
command = ['git', 'for-each-ref', '--format', '%(refname:short)',
'refs/tags/']
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)['stdout'].splitlines() |
def angle(self, vertices):
"""
If Text is 2D, get the rotation angle in radians.
Parameters
-----------
vertices : (n, 2) float
Vertices in space referenced by self.points
Returns
---------
angle : float
Rotation angle in radians
"""
if vertices.shape[1] != 2:
raise ValueError('angle only valid for 2D points!')
# get the vector from origin
direction = vertices[self.vector] - vertices[self.origin]
# get the rotation angle in radians
angle = np.arctan2(*direction[::-1])
return angle | If Text is 2D, get the rotation angle in radians.
Parameters
-----------
vertices : (n, 2) float
Vertices in space referenced by self.points
Returns
---------
angle : float
Rotation angle in radians | Below is the the instruction that describes the task:
### Input:
If Text is 2D, get the rotation angle in radians.
Parameters
-----------
vertices : (n, 2) float
Vertices in space referenced by self.points
Returns
---------
angle : float
Rotation angle in radians
### Response:
def angle(self, vertices):
"""
If Text is 2D, get the rotation angle in radians.
Parameters
-----------
vertices : (n, 2) float
Vertices in space referenced by self.points
Returns
---------
angle : float
Rotation angle in radians
"""
if vertices.shape[1] != 2:
raise ValueError('angle only valid for 2D points!')
# get the vector from origin
direction = vertices[self.vector] - vertices[self.origin]
# get the rotation angle in radians
angle = np.arctan2(*direction[::-1])
return angle |
def write_hdf5_series(series, output, path=None, attrs=None, **kwargs):
"""Write a Series to HDF5.
See :func:`write_hdf5_array` for details of arguments and keywords.
"""
if attrs is None:
attrs = format_index_array_attrs(series)
return write_hdf5_array(series, output, path=path, attrs=attrs, **kwargs) | Write a Series to HDF5.
See :func:`write_hdf5_array` for details of arguments and keywords. | Below is the the instruction that describes the task:
### Input:
Write a Series to HDF5.
See :func:`write_hdf5_array` for details of arguments and keywords.
### Response:
def write_hdf5_series(series, output, path=None, attrs=None, **kwargs):
"""Write a Series to HDF5.
See :func:`write_hdf5_array` for details of arguments and keywords.
"""
if attrs is None:
attrs = format_index_array_attrs(series)
return write_hdf5_array(series, output, path=path, attrs=attrs, **kwargs) |
def interrupt (aggregate):
"""Interrupt execution and shutdown, ignoring any subsequent
interrupts."""
while True:
try:
log.warn(LOG_CHECK,
_("interrupt; waiting for active threads to finish"))
log.warn(LOG_CHECK,
_("another interrupt will exit immediately"))
abort(aggregate)
break
except KeyboardInterrupt:
pass | Interrupt execution and shutdown, ignoring any subsequent
interrupts. | Below is the the instruction that describes the task:
### Input:
Interrupt execution and shutdown, ignoring any subsequent
interrupts.
### Response:
def interrupt (aggregate):
"""Interrupt execution and shutdown, ignoring any subsequent
interrupts."""
while True:
try:
log.warn(LOG_CHECK,
_("interrupt; waiting for active threads to finish"))
log.warn(LOG_CHECK,
_("another interrupt will exit immediately"))
abort(aggregate)
break
except KeyboardInterrupt:
pass |
async def list_all_active_projects(self, page_size=1000):
"""Get all active projects.
You can find the endpoint documentation `here <https://cloud.
google.com/resource-manager/reference/rest/v1/projects/list>`__.
Args:
page_size (int): hint for the client to only retrieve up to
this number of results per API call.
Returns:
list(dicts): all active projects
"""
url = f'{self.BASE_URL}/{self.api_version}/projects'
params = {'pageSize': page_size}
responses = await self.list_all(url, params)
projects = self._parse_rsps_for_projects(responses)
return [
project for project in projects
if project.get('lifecycleState', '').lower() == 'active'
] | Get all active projects.
You can find the endpoint documentation `here <https://cloud.
google.com/resource-manager/reference/rest/v1/projects/list>`__.
Args:
page_size (int): hint for the client to only retrieve up to
this number of results per API call.
Returns:
list(dicts): all active projects | Below is the the instruction that describes the task:
### Input:
Get all active projects.
You can find the endpoint documentation `here <https://cloud.
google.com/resource-manager/reference/rest/v1/projects/list>`__.
Args:
page_size (int): hint for the client to only retrieve up to
this number of results per API call.
Returns:
list(dicts): all active projects
### Response:
async def list_all_active_projects(self, page_size=1000):
"""Get all active projects.
You can find the endpoint documentation `here <https://cloud.
google.com/resource-manager/reference/rest/v1/projects/list>`__.
Args:
page_size (int): hint for the client to only retrieve up to
this number of results per API call.
Returns:
list(dicts): all active projects
"""
url = f'{self.BASE_URL}/{self.api_version}/projects'
params = {'pageSize': page_size}
responses = await self.list_all(url, params)
projects = self._parse_rsps_for_projects(responses)
return [
project for project in projects
if project.get('lifecycleState', '').lower() == 'active'
] |
def to_fmt(self) -> fmt.indentable:
"""
Return an Fmt representation for pretty-printing
"""
lsb = []
if len(self._lsig) > 0:
for s in self._lsig:
lsb.append(s.to_fmt())
block = fmt.block("(", ")", fmt.sep(', ', lsb))
qual = "tuple"
txt = fmt.sep("", [qual, block])
return txt | Return an Fmt representation for pretty-printing | Below is the the instruction that describes the task:
### Input:
Return an Fmt representation for pretty-printing
### Response:
def to_fmt(self) -> fmt.indentable:
"""
Return an Fmt representation for pretty-printing
"""
lsb = []
if len(self._lsig) > 0:
for s in self._lsig:
lsb.append(s.to_fmt())
block = fmt.block("(", ")", fmt.sep(', ', lsb))
qual = "tuple"
txt = fmt.sep("", [qual, block])
return txt |
def xml_entity_escape(data):
"""
replace special characters with their XML entity versions
"""
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
return data | replace special characters with their XML entity versions | Below is the the instruction that describes the task:
### Input:
replace special characters with their XML entity versions
### Response:
def xml_entity_escape(data):
"""
replace special characters with their XML entity versions
"""
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
return data |
def has_child(cls, child_type, query):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html
The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query.
> child_query = ElasticQuery().term(tag='something')
> query = ElasticQuery().has_Child('blog_tag', child_query)
'''
instance = cls(has_child={'type': child_type, 'query': query})
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html
The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query.
> child_query = ElasticQuery().term(tag='something')
> query = ElasticQuery().has_Child('blog_tag', child_query) | Below is the the instruction that describes the task:
### Input:
http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html
The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query.
> child_query = ElasticQuery().term(tag='something')
> query = ElasticQuery().has_Child('blog_tag', child_query)
### Response:
def has_child(cls, child_type, query):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html
The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query.
> child_query = ElasticQuery().term(tag='something')
> query = ElasticQuery().has_Child('blog_tag', child_query)
'''
instance = cls(has_child={'type': child_type, 'query': query})
return instance |
def log_subtract(loga, logb):
r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float
"""
return loga + np.log(1 - np.exp(logb - loga)) | r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float | Below is the the instruction that describes the task:
### Input:
r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float
### Response:
def log_subtract(loga, logb):
r"""Numerically stable method for avoiding overflow errors when calculating
:math:`\log (a-b)`, given :math:`\log (a)`, :math:`\log (a)` and that
:math:`a > b`.
See https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
for more details.
Parameters
----------
loga: float
logb: float
Must be less than loga.
Returns
-------
log(a - b): float
"""
return loga + np.log(1 - np.exp(logb - loga)) |
def update_x(self, x, indices=None):
"""
Update partial or entire x.
Args:
x (numpy.ndarray or list): to-be-updated x
indices (numpy.ndarray or list or optional): to-be-updated qubit indices
Returns:
Pauli: self
Raises:
QiskitError: when updating whole x, the number of qubits must be the same.
"""
x = _make_np_bool(x)
if indices is None:
if len(self._x) != len(x):
raise QiskitError("During updating whole x, you can not change "
"the number of qubits.")
self._x = x
else:
if not isinstance(indices, list) and not isinstance(indices, np.ndarray):
indices = [indices]
for p, idx in enumerate(indices):
self._x[idx] = x[p]
return self | Update partial or entire x.
Args:
x (numpy.ndarray or list): to-be-updated x
indices (numpy.ndarray or list or optional): to-be-updated qubit indices
Returns:
Pauli: self
Raises:
QiskitError: when updating whole x, the number of qubits must be the same. | Below is the the instruction that describes the task:
### Input:
Update partial or entire x.
Args:
x (numpy.ndarray or list): to-be-updated x
indices (numpy.ndarray or list or optional): to-be-updated qubit indices
Returns:
Pauli: self
Raises:
QiskitError: when updating whole x, the number of qubits must be the same.
### Response:
def update_x(self, x, indices=None):
"""
Update partial or entire x.
Args:
x (numpy.ndarray or list): to-be-updated x
indices (numpy.ndarray or list or optional): to-be-updated qubit indices
Returns:
Pauli: self
Raises:
QiskitError: when updating whole x, the number of qubits must be the same.
"""
x = _make_np_bool(x)
if indices is None:
if len(self._x) != len(x):
raise QiskitError("During updating whole x, you can not change "
"the number of qubits.")
self._x = x
else:
if not isinstance(indices, list) and not isinstance(indices, np.ndarray):
indices = [indices]
for p, idx in enumerate(indices):
self._x[idx] = x[p]
return self |
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = super().get_context_data(**kwargs)
context['poster'] = self.poster
return context | Returns the context data to provide to the template. | Below is the the instruction that describes the task:
### Input:
Returns the context data to provide to the template.
### Response:
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = super().get_context_data(**kwargs)
context['poster'] = self.poster
return context |
def packets(self):
"""
:return: dictionary {id: object} of all packets.
:rtype: dict of (int, xenamanager.xena_port.XenaCapturePacket)
"""
if not self.get_object_by_type('cappacket'):
for index in range(0, self.read_stats()['packets']):
XenaCapturePacket(parent=self, index='{}/{}'.format(self.index, index))
return {p.id: p for p in self.get_objects_by_type('cappacket')} | :return: dictionary {id: object} of all packets.
:rtype: dict of (int, xenamanager.xena_port.XenaCapturePacket) | Below is the the instruction that describes the task:
### Input:
:return: dictionary {id: object} of all packets.
:rtype: dict of (int, xenamanager.xena_port.XenaCapturePacket)
### Response:
def packets(self):
"""
:return: dictionary {id: object} of all packets.
:rtype: dict of (int, xenamanager.xena_port.XenaCapturePacket)
"""
if not self.get_object_by_type('cappacket'):
for index in range(0, self.read_stats()['packets']):
XenaCapturePacket(parent=self, index='{}/{}'.format(self.index, index))
return {p.id: p for p in self.get_objects_by_type('cappacket')} |
def join_multicast(self, universe: int) -> None:
"""
Joins the multicast address that is used for the given universe. Note: If you are on Windows you must have given
a bind IP-Address for this feature to function properly. On the other hand you are not allowed to set a bind
address if you are on any other OS.
:param universe: the universe to join the multicast group.
The network hardware has to support the multicast feature!
"""
self.sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress)) | Joins the multicast address that is used for the given universe. Note: If you are on Windows you must have given
a bind IP-Address for this feature to function properly. On the other hand you are not allowed to set a bind
address if you are on any other OS.
:param universe: the universe to join the multicast group.
The network hardware has to support the multicast feature! | Below is the the instruction that describes the task:
### Input:
Joins the multicast address that is used for the given universe. Note: If you are on Windows you must have given
a bind IP-Address for this feature to function properly. On the other hand you are not allowed to set a bind
address if you are on any other OS.
:param universe: the universe to join the multicast group.
The network hardware has to support the multicast feature!
### Response:
def join_multicast(self, universe: int) -> None:
"""
Joins the multicast address that is used for the given universe. Note: If you are on Windows you must have given
a bind IP-Address for this feature to function properly. On the other hand you are not allowed to set a bind
address if you are on any other OS.
:param universe: the universe to join the multicast group.
The network hardware has to support the multicast feature!
"""
self.sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress)) |
def _get_utc_sun_time_deg(self, deg):
"""
Return the times in minutes from 00:00 (utc) for a given sun altitude.
This is done for a given sun altitude in sunrise `deg` degrees
This function only works for altitudes sun really is.
If the sun never gets to this altitude, the returned sunset and sunrise
values will be negative. This can happen in low altitude when latitude
is nearing the poles in winter times, the sun never goes very high in
the sky there.
Algorithm from
http://www.srrb.noaa.gov/highlights/sunrise/calcdetails.html
The low accuracy solar position equations are used.
These routines are based on Jean Meeus's book Astronomical Algorithms.
"""
gama = 0 # location of sun in yearly cycle in radians
eqtime = 0 # difference betwen sun noon and clock noon
decl = 0 # sun declanation
hour_angle = 0 # solar hour angle
sunrise_angle = math.pi * deg / 180.0 # sun angle at sunrise/set
# get the day of year
day_of_year = self.gday_of_year()
# get radians of sun orbit around earth =)
gama = 2.0 * math.pi * ((day_of_year - 1) / 365.0)
# get the diff betwen suns clock and wall clock in minutes
eqtime = 229.18 * (0.000075 + 0.001868 * math.cos(gama) -
0.032077 * math.sin(gama) -
0.014615 * math.cos(2.0 * gama) -
0.040849 * math.sin(2.0 * gama))
# calculate suns declanation at the equater in radians
decl = (0.006918 - 0.399912 * math.cos(gama) +
0.070257 * math.sin(gama) -
0.006758 * math.cos(2.0 * gama) +
0.000907 * math.sin(2.0 * gama) -
0.002697 * math.cos(3.0 * gama) +
0.00148 * math.sin(3.0 * gama))
# we use radians, ratio is 2pi/360
latitude = math.pi * self.location.latitude / 180.0
# the sun real time diff from noon at sunset/rise in radians
try:
hour_angle = (math.acos(
math.cos(sunrise_angle) /
(math.cos(latitude) * math.cos(decl)) -
math.tan(latitude) * math.tan(decl)))
# check for too high altitudes and return negative values
except ValueError:
return -720, -720
# we use minutes, ratio is 1440min/2pi
hour_angle = 720.0 * hour_angle / math.pi
# get sunset/rise times in utc wall clock in minutes from 00:00 time
# sunrise / sunset
longitude = self.location.longitude
return int(720.0 - 4.0 * longitude - hour_angle - eqtime), \
int(720.0 - 4.0 * longitude + hour_angle - eqtime) | Return the times in minutes from 00:00 (utc) for a given sun altitude.
This is done for a given sun altitude in sunrise `deg` degrees
This function only works for altitudes sun really is.
If the sun never gets to this altitude, the returned sunset and sunrise
values will be negative. This can happen in low altitude when latitude
is nearing the poles in winter times, the sun never goes very high in
the sky there.
Algorithm from
http://www.srrb.noaa.gov/highlights/sunrise/calcdetails.html
The low accuracy solar position equations are used.
These routines are based on Jean Meeus's book Astronomical Algorithms. | Below is the the instruction that describes the task:
### Input:
Return the times in minutes from 00:00 (utc) for a given sun altitude.
This is done for a given sun altitude in sunrise `deg` degrees
This function only works for altitudes sun really is.
If the sun never gets to this altitude, the returned sunset and sunrise
values will be negative. This can happen in low altitude when latitude
is nearing the poles in winter times, the sun never goes very high in
the sky there.
Algorithm from
http://www.srrb.noaa.gov/highlights/sunrise/calcdetails.html
The low accuracy solar position equations are used.
These routines are based on Jean Meeus's book Astronomical Algorithms.
### Response:
def _get_utc_sun_time_deg(self, deg):
"""
Return the times in minutes from 00:00 (utc) for a given sun altitude.
This is done for a given sun altitude in sunrise `deg` degrees
This function only works for altitudes sun really is.
If the sun never gets to this altitude, the returned sunset and sunrise
values will be negative. This can happen in low altitude when latitude
is nearing the poles in winter times, the sun never goes very high in
the sky there.
Algorithm from
http://www.srrb.noaa.gov/highlights/sunrise/calcdetails.html
The low accuracy solar position equations are used.
These routines are based on Jean Meeus's book Astronomical Algorithms.
"""
gama = 0 # location of sun in yearly cycle in radians
eqtime = 0 # difference betwen sun noon and clock noon
decl = 0 # sun declanation
hour_angle = 0 # solar hour angle
sunrise_angle = math.pi * deg / 180.0 # sun angle at sunrise/set
# get the day of year
day_of_year = self.gday_of_year()
# get radians of sun orbit around earth =)
gama = 2.0 * math.pi * ((day_of_year - 1) / 365.0)
# get the diff betwen suns clock and wall clock in minutes
eqtime = 229.18 * (0.000075 + 0.001868 * math.cos(gama) -
0.032077 * math.sin(gama) -
0.014615 * math.cos(2.0 * gama) -
0.040849 * math.sin(2.0 * gama))
# calculate suns declanation at the equater in radians
decl = (0.006918 - 0.399912 * math.cos(gama) +
0.070257 * math.sin(gama) -
0.006758 * math.cos(2.0 * gama) +
0.000907 * math.sin(2.0 * gama) -
0.002697 * math.cos(3.0 * gama) +
0.00148 * math.sin(3.0 * gama))
# we use radians, ratio is 2pi/360
latitude = math.pi * self.location.latitude / 180.0
# the sun real time diff from noon at sunset/rise in radians
try:
hour_angle = (math.acos(
math.cos(sunrise_angle) /
(math.cos(latitude) * math.cos(decl)) -
math.tan(latitude) * math.tan(decl)))
# check for too high altitudes and return negative values
except ValueError:
return -720, -720
# we use minutes, ratio is 1440min/2pi
hour_angle = 720.0 * hour_angle / math.pi
# get sunset/rise times in utc wall clock in minutes from 00:00 time
# sunrise / sunset
longitude = self.location.longitude
return int(720.0 - 4.0 * longitude - hour_angle - eqtime), \
int(720.0 - 4.0 * longitude + hour_angle - eqtime) |
def _elem_set_attrs(obj, parent, to_str):
"""
:param obj: Container instance gives attributes of XML Element
:param parent: XML ElementTree parent node object
:param to_str: Callable to convert value to string or None
:param options: Keyword options, see :func:`container_to_etree`
:return: None but parent will be modified
"""
for attr, val in anyconfig.compat.iteritems(obj):
parent.set(attr, to_str(val)) | :param obj: Container instance gives attributes of XML Element
:param parent: XML ElementTree parent node object
:param to_str: Callable to convert value to string or None
:param options: Keyword options, see :func:`container_to_etree`
:return: None but parent will be modified | Below is the the instruction that describes the task:
### Input:
:param obj: Container instance gives attributes of XML Element
:param parent: XML ElementTree parent node object
:param to_str: Callable to convert value to string or None
:param options: Keyword options, see :func:`container_to_etree`
:return: None but parent will be modified
### Response:
def _elem_set_attrs(obj, parent, to_str):
"""
:param obj: Container instance gives attributes of XML Element
:param parent: XML ElementTree parent node object
:param to_str: Callable to convert value to string or None
:param options: Keyword options, see :func:`container_to_etree`
:return: None but parent will be modified
"""
for attr, val in anyconfig.compat.iteritems(obj):
parent.set(attr, to_str(val)) |
def _generate_dockerfile(base_image, layers):
"""
Generate the Dockerfile contents
A generated Dockerfile will look like the following:
```
FROM lambci/lambda:python3.6
ADD --chown=sbx_user1051:495 layer1 /opt
ADD --chown=sbx_user1051:495 layer2 /opt
```
Parameters
----------
base_image str
Base Image to use for the new image
layers list(samcli.commands.local.lib.provider.Layer)
List of Layers to be use to mount in the image
Returns
-------
str
String representing the Dockerfile contents for the image
"""
dockerfile_content = "FROM {}\n".format(base_image)
for layer in layers:
dockerfile_content = dockerfile_content + \
"ADD --chown=sbx_user1051:495 {} {}\n".format(layer.name, LambdaImage._LAYERS_DIR)
return dockerfile_content | Generate the Dockerfile contents
A generated Dockerfile will look like the following:
```
FROM lambci/lambda:python3.6
ADD --chown=sbx_user1051:495 layer1 /opt
ADD --chown=sbx_user1051:495 layer2 /opt
```
Parameters
----------
base_image str
Base Image to use for the new image
layers list(samcli.commands.local.lib.provider.Layer)
List of Layers to be use to mount in the image
Returns
-------
str
String representing the Dockerfile contents for the image | Below is the the instruction that describes the task:
### Input:
Generate the Dockerfile contents
A generated Dockerfile will look like the following:
```
FROM lambci/lambda:python3.6
ADD --chown=sbx_user1051:495 layer1 /opt
ADD --chown=sbx_user1051:495 layer2 /opt
```
Parameters
----------
base_image str
Base Image to use for the new image
layers list(samcli.commands.local.lib.provider.Layer)
List of Layers to be use to mount in the image
Returns
-------
str
String representing the Dockerfile contents for the image
### Response:
def _generate_dockerfile(base_image, layers):
"""
Generate the Dockerfile contents
A generated Dockerfile will look like the following:
```
FROM lambci/lambda:python3.6
ADD --chown=sbx_user1051:495 layer1 /opt
ADD --chown=sbx_user1051:495 layer2 /opt
```
Parameters
----------
base_image str
Base Image to use for the new image
layers list(samcli.commands.local.lib.provider.Layer)
List of Layers to be use to mount in the image
Returns
-------
str
String representing the Dockerfile contents for the image
"""
dockerfile_content = "FROM {}\n".format(base_image)
for layer in layers:
dockerfile_content = dockerfile_content + \
"ADD --chown=sbx_user1051:495 {} {}\n".format(layer.name, LambdaImage._LAYERS_DIR)
return dockerfile_content |
def GetArchiveTypeIndicators(cls, path_spec, resolver_context=None):
"""Determines if a file contains a supported archive types.
Args:
path_spec (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built-in context which is not multi process safe.
Returns:
list[str]: supported format type indicators.
"""
if (cls._archive_remainder_list is None or
cls._archive_store is None):
specification_store, remainder_list = cls._GetSpecificationStore(
definitions.FORMAT_CATEGORY_ARCHIVE)
cls._archive_remainder_list = remainder_list
cls._archive_store = specification_store
if cls._archive_scanner is None:
cls._archive_scanner = cls._GetSignatureScanner(cls._archive_store)
return cls._GetTypeIndicators(
cls._archive_scanner, cls._archive_store,
cls._archive_remainder_list, path_spec,
resolver_context=resolver_context) | Determines if a file contains a supported archive types.
Args:
path_spec (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built-in context which is not multi process safe.
Returns:
list[str]: supported format type indicators. | Below is the the instruction that describes the task:
### Input:
Determines if a file contains a supported archive types.
Args:
path_spec (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built-in context which is not multi process safe.
Returns:
list[str]: supported format type indicators.
### Response:
def GetArchiveTypeIndicators(cls, path_spec, resolver_context=None):
"""Determines if a file contains a supported archive types.
Args:
path_spec (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built-in context which is not multi process safe.
Returns:
list[str]: supported format type indicators.
"""
if (cls._archive_remainder_list is None or
cls._archive_store is None):
specification_store, remainder_list = cls._GetSpecificationStore(
definitions.FORMAT_CATEGORY_ARCHIVE)
cls._archive_remainder_list = remainder_list
cls._archive_store = specification_store
if cls._archive_scanner is None:
cls._archive_scanner = cls._GetSignatureScanner(cls._archive_store)
return cls._GetTypeIndicators(
cls._archive_scanner, cls._archive_store,
cls._archive_remainder_list, path_spec,
resolver_context=resolver_context) |
def rindex(values, value):
""" :return: the highest index in values where value is found, else raise ValueError """
if isinstance(values, STRING_TYPES):
try:
return values.rindex(value)
except TypeError:
# Python 3 compliance: search for str values in bytearray
return values.rindex(type(values)(value, DEFAULT_ENCODING))
else:
return len(values) - 1 - values[::-1].index(value) | :return: the highest index in values where value is found, else raise ValueError | Below is the the instruction that describes the task:
### Input:
:return: the highest index in values where value is found, else raise ValueError
### Response:
def rindex(values, value):
""" :return: the highest index in values where value is found, else raise ValueError """
if isinstance(values, STRING_TYPES):
try:
return values.rindex(value)
except TypeError:
# Python 3 compliance: search for str values in bytearray
return values.rindex(type(values)(value, DEFAULT_ENCODING))
else:
return len(values) - 1 - values[::-1].index(value) |
def compress_mean(x, dim, compression_factor):
"""Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor
"""
dims = x.shape.dims
pos = dims.index(dim)
compressed_dim = mtf.Dimension(dim.name, dim.size // compression_factor)
compression_factor_dim = mtf.Dimension(
"compression_factor", compression_factor)
new_shape = (
dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])
x = mtf.reshape(x, new_shape)
x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)
return x | Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor | Below is the the instruction that describes the task:
### Input:
Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor
### Response:
def compress_mean(x, dim, compression_factor):
"""Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor
"""
dims = x.shape.dims
pos = dims.index(dim)
compressed_dim = mtf.Dimension(dim.name, dim.size // compression_factor)
compression_factor_dim = mtf.Dimension(
"compression_factor", compression_factor)
new_shape = (
dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])
x = mtf.reshape(x, new_shape)
x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)
return x |
def agent_checks(consul_url=None, token=None):
'''
Returns the checks the local agent is managing
:param consul_url: The Consul server URL.
:return: Returns the checks the local agent is managing
CLI Example:
.. code-block:: bash
salt '*' consul.agent_checks
'''
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
function = 'agent/checks'
ret = _query(consul_url=consul_url,
function=function,
token=token,
method='GET')
return ret | Returns the checks the local agent is managing
:param consul_url: The Consul server URL.
:return: Returns the checks the local agent is managing
CLI Example:
.. code-block:: bash
salt '*' consul.agent_checks | Below is the the instruction that describes the task:
### Input:
Returns the checks the local agent is managing
:param consul_url: The Consul server URL.
:return: Returns the checks the local agent is managing
CLI Example:
.. code-block:: bash
salt '*' consul.agent_checks
### Response:
def agent_checks(consul_url=None, token=None):
'''
Returns the checks the local agent is managing
:param consul_url: The Consul server URL.
:return: Returns the checks the local agent is managing
CLI Example:
.. code-block:: bash
salt '*' consul.agent_checks
'''
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
function = 'agent/checks'
ret = _query(consul_url=consul_url,
function=function,
token=token,
method='GET')
return ret |
def _handle_select(self, parts, result_metadata=None):
"""Handle reply messages from SELECT statements"""
self.rowcount = -1
if result_metadata is not None:
# Select was prepared and we can use the already received metadata
self.description, self._column_types = self._handle_result_metadata(result_metadata)
for part in parts:
if part.kind == part_kinds.RESULTSETID:
self._resultset_id = part.value
elif part.kind == part_kinds.RESULTSETMETADATA:
self.description, self._column_types = self._handle_result_metadata(part)
elif part.kind == part_kinds.RESULTSET:
self._buffer = part.unpack_rows(self._column_types, self.connection)
self._received_last_resultset_part = part.attribute & 1
self._executed = True
elif part.kind in (part_kinds.STATEMENTCONTEXT, part_kinds.TRANSACTIONFLAGS, part_kinds.PARAMETERMETADATA):
pass
else:
raise InterfaceError("Prepared select statement response, unexpected part kind %d." % part.kind) | Handle reply messages from SELECT statements | Below is the the instruction that describes the task:
### Input:
Handle reply messages from SELECT statements
### Response:
def _handle_select(self, parts, result_metadata=None):
"""Handle reply messages from SELECT statements"""
self.rowcount = -1
if result_metadata is not None:
# Select was prepared and we can use the already received metadata
self.description, self._column_types = self._handle_result_metadata(result_metadata)
for part in parts:
if part.kind == part_kinds.RESULTSETID:
self._resultset_id = part.value
elif part.kind == part_kinds.RESULTSETMETADATA:
self.description, self._column_types = self._handle_result_metadata(part)
elif part.kind == part_kinds.RESULTSET:
self._buffer = part.unpack_rows(self._column_types, self.connection)
self._received_last_resultset_part = part.attribute & 1
self._executed = True
elif part.kind in (part_kinds.STATEMENTCONTEXT, part_kinds.TRANSACTIONFLAGS, part_kinds.PARAMETERMETADATA):
pass
else:
raise InterfaceError("Prepared select statement response, unexpected part kind %d." % part.kind) |
def remove_this_clink(self,clink_id):
"""
Removes the clink for the given clink identifier
@type clink_id: string
@param clink_id: the clink identifier to be removed
"""
for clink in self.get_clinks():
if clink.get_id() == clink_id:
self.node.remove(clink.get_node())
break | Removes the clink for the given clink identifier
@type clink_id: string
@param clink_id: the clink identifier to be removed | Below is the the instruction that describes the task:
### Input:
Removes the clink for the given clink identifier
@type clink_id: string
@param clink_id: the clink identifier to be removed
### Response:
def remove_this_clink(self,clink_id):
"""
Removes the clink for the given clink identifier
@type clink_id: string
@param clink_id: the clink identifier to be removed
"""
for clink in self.get_clinks():
if clink.get_id() == clink_id:
self.node.remove(clink.get_node())
break |
def pre_save(self, instance, add: bool):
"""Ran just before the model is saved, allows us to built
the slug.
Arguments:
instance:
The model that is being saved.
add:
Indicates whether this is a new entry
to the database or an update.
"""
localized_value = getattr(instance, self.attname)
if not localized_value:
return None
for lang_code, _ in settings.LANGUAGES:
value = localized_value.get(lang_code)
if not value:
continue
localized_value.set(
lang_code,
bleach.clean(value, **get_bleach_default_options())
)
return localized_value | Ran just before the model is saved, allows us to built
the slug.
Arguments:
instance:
The model that is being saved.
add:
Indicates whether this is a new entry
to the database or an update. | Below is the the instruction that describes the task:
### Input:
Ran just before the model is saved, allows us to built
the slug.
Arguments:
instance:
The model that is being saved.
add:
Indicates whether this is a new entry
to the database or an update.
### Response:
def pre_save(self, instance, add: bool):
"""Ran just before the model is saved, allows us to built
the slug.
Arguments:
instance:
The model that is being saved.
add:
Indicates whether this is a new entry
to the database or an update.
"""
localized_value = getattr(instance, self.attname)
if not localized_value:
return None
for lang_code, _ in settings.LANGUAGES:
value = localized_value.get(lang_code)
if not value:
continue
localized_value.set(
lang_code,
bleach.clean(value, **get_bleach_default_options())
)
return localized_value |
def move(self, group, cluster_ids=None):
"""Assign a group to some clusters.
Example: `good`
"""
if isinstance(cluster_ids, string_types):
logger.warn("The list of clusters should be a list of integers, "
"not a string.")
return
self.label('group', group, cluster_ids=cluster_ids) | Assign a group to some clusters.
Example: `good` | Below is the the instruction that describes the task:
### Input:
Assign a group to some clusters.
Example: `good`
### Response:
def move(self, group, cluster_ids=None):
"""Assign a group to some clusters.
Example: `good`
"""
if isinstance(cluster_ids, string_types):
logger.warn("The list of clusters should be a list of integers, "
"not a string.")
return
self.label('group', group, cluster_ids=cluster_ids) |
def sanitize(self):
'''
Check and optionally fix properties
'''
# Let the parent do its stuff
super(Protocol, self).sanitize()
# Check if the next header is of the right type, and fix this header
# if we know better (i.e. the payload is a ProtocolElement so we know
# the header_type)
if isinstance(self.payload, Protocol):
self.next_header = self.payload.header_type | Check and optionally fix properties | Below is the the instruction that describes the task:
### Input:
Check and optionally fix properties
### Response:
def sanitize(self):
'''
Check and optionally fix properties
'''
# Let the parent do its stuff
super(Protocol, self).sanitize()
# Check if the next header is of the right type, and fix this header
# if we know better (i.e. the payload is a ProtocolElement so we know
# the header_type)
if isinstance(self.payload, Protocol):
self.next_header = self.payload.header_type |
def initialize(*args, **kwargs):
"""
Functional approach to initializing Sanic JWT. This was the original
method, but was replaced by the Initialize class. It is recommended to use
the class because it is more flexible. There is no current plan to remove
this method, but it may be depracated in the future.
"""
if len(args) > 1:
kwargs.update({"authenticate": args[1]})
return Initialize(args[0], **kwargs) | Functional approach to initializing Sanic JWT. This was the original
method, but was replaced by the Initialize class. It is recommended to use
the class because it is more flexible. There is no current plan to remove
this method, but it may be depracated in the future. | Below is the the instruction that describes the task:
### Input:
Functional approach to initializing Sanic JWT. This was the original
method, but was replaced by the Initialize class. It is recommended to use
the class because it is more flexible. There is no current plan to remove
this method, but it may be depracated in the future.
### Response:
def initialize(*args, **kwargs):
"""
Functional approach to initializing Sanic JWT. This was the original
method, but was replaced by the Initialize class. It is recommended to use
the class because it is more flexible. There is no current plan to remove
this method, but it may be depracated in the future.
"""
if len(args) > 1:
kwargs.update({"authenticate": args[1]})
return Initialize(args[0], **kwargs) |
def complete_offset_upload(self, chunk_num):
# type: (Descriptor, int) -> None
"""Complete the upload for the offset
:param Descriptor self: this
:param int chunk_num: chunk num completed
"""
with self._meta_lock:
self._outstanding_ops -= 1
# save resume state
if self.is_resumable:
# only set resumable completed if all replicas for this
# chunk are complete
if blobxfer.util.is_not_empty(self._dst_ase.replica_targets):
if chunk_num not in self._replica_counters:
# start counter at -1 since we need 1 "extra" for the
# primary in addition to the replica targets
self._replica_counters[chunk_num] = -1
self._replica_counters[chunk_num] += 1
if (self._replica_counters[chunk_num] !=
len(self._dst_ase.replica_targets)):
return
else:
self._replica_counters.pop(chunk_num)
self._completed_chunks.set(True, chunk_num)
completed = self._outstanding_ops == 0
self._resume_mgr.add_or_update_record(
self._dst_ase, self._src_block_list, self._offset,
self._chunk_size, self._total_chunks,
self._completed_chunks.int, completed,
) | Complete the upload for the offset
:param Descriptor self: this
:param int chunk_num: chunk num completed | Below is the the instruction that describes the task:
### Input:
Complete the upload for the offset
:param Descriptor self: this
:param int chunk_num: chunk num completed
### Response:
def complete_offset_upload(self, chunk_num):
# type: (Descriptor, int) -> None
"""Complete the upload for the offset
:param Descriptor self: this
:param int chunk_num: chunk num completed
"""
with self._meta_lock:
self._outstanding_ops -= 1
# save resume state
if self.is_resumable:
# only set resumable completed if all replicas for this
# chunk are complete
if blobxfer.util.is_not_empty(self._dst_ase.replica_targets):
if chunk_num not in self._replica_counters:
# start counter at -1 since we need 1 "extra" for the
# primary in addition to the replica targets
self._replica_counters[chunk_num] = -1
self._replica_counters[chunk_num] += 1
if (self._replica_counters[chunk_num] !=
len(self._dst_ase.replica_targets)):
return
else:
self._replica_counters.pop(chunk_num)
self._completed_chunks.set(True, chunk_num)
completed = self._outstanding_ops == 0
self._resume_mgr.add_or_update_record(
self._dst_ase, self._src_block_list, self._offset,
self._chunk_size, self._total_chunks,
self._completed_chunks.int, completed,
) |
def partitions(l, partition_size):
"""
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
"""
for i in xrange(0, len(l), partition_size):
yield l[i:i + partition_size] | >>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions | Below is the the instruction that describes the task:
### Input:
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
### Response:
def partitions(l, partition_size):
"""
>>> list(partitions([], 10))
[]
>>> list(partitions([1,2,3,4,5], 1))
[[1], [2], [3], [4], [5]]
>>> list(partitions([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
>>> list(partitions([1,2,3,4,5], 5))
[[1, 2, 3, 4, 5]]
:param list l: List to be partitioned
:param int partition_size: Size of partitions
"""
for i in xrange(0, len(l), partition_size):
yield l[i:i + partition_size] |
def to_dict_list(df, use_ordered_dict=True):
"""Transform each row to dict, and put them into a list.
**中文文档**
将 ``pandas.DataFrame`` 转换成一个字典的列表。列表的长度与行数相同, 其中
每一个字典相当于表中的一行, 相当于一个 ``pandas.Series`` 对象。
"""
if use_ordered_dict:
dict = OrderedDict
columns = df.columns
data = list()
for tp in itertuple(df):
data.append(dict(zip(columns, tp)))
return data | Transform each row to dict, and put them into a list.
**中文文档**
将 ``pandas.DataFrame`` 转换成一个字典的列表。列表的长度与行数相同, 其中
每一个字典相当于表中的一行, 相当于一个 ``pandas.Series`` 对象。 | Below is the the instruction that describes the task:
### Input:
Transform each row to dict, and put them into a list.
**中文文档**
将 ``pandas.DataFrame`` 转换成一个字典的列表。列表的长度与行数相同, 其中
每一个字典相当于表中的一行, 相当于一个 ``pandas.Series`` 对象。
### Response:
def to_dict_list(df, use_ordered_dict=True):
"""Transform each row to dict, and put them into a list.
**中文文档**
将 ``pandas.DataFrame`` 转换成一个字典的列表。列表的长度与行数相同, 其中
每一个字典相当于表中的一行, 相当于一个 ``pandas.Series`` 对象。
"""
if use_ordered_dict:
dict = OrderedDict
columns = df.columns
data = list()
for tp in itertuple(df):
data.append(dict(zip(columns, tp)))
return data |
def post_message(message,
channel=None,
username=None,
api_url=None,
hook=None):
'''
Send a message to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param message: The message to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt-run mattermost.post_message message='Build is done'
'''
if not api_url:
api_url = _get_api_url()
if not hook:
hook = _get_hook()
if not username:
username = _get_username()
if not channel:
channel = _get_channel()
if not message:
log.error('message is a required option.')
parameters = dict()
if channel:
parameters['channel'] = channel
if username:
parameters['username'] = username
parameters['text'] = '```' + message + '```' # pre-formatted, fixed-width text
log.debug('Parameters: %s', parameters)
data = salt.utils.json.dumps(parameters)
result = salt.utils.mattermost.query(
api_url=api_url,
hook=hook,
data=str('payload={0}').format(data)) # future lint: blacklisted-function
if result:
return True
else:
return result | Send a message to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param message: The message to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt-run mattermost.post_message message='Build is done' | Below is the the instruction that describes the task:
### Input:
Send a message to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param message: The message to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt-run mattermost.post_message message='Build is done'
### Response:
def post_message(message,
channel=None,
username=None,
api_url=None,
hook=None):
'''
Send a message to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param message: The message to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt-run mattermost.post_message message='Build is done'
'''
if not api_url:
api_url = _get_api_url()
if not hook:
hook = _get_hook()
if not username:
username = _get_username()
if not channel:
channel = _get_channel()
if not message:
log.error('message is a required option.')
parameters = dict()
if channel:
parameters['channel'] = channel
if username:
parameters['username'] = username
parameters['text'] = '```' + message + '```' # pre-formatted, fixed-width text
log.debug('Parameters: %s', parameters)
data = salt.utils.json.dumps(parameters)
result = salt.utils.mattermost.query(
api_url=api_url,
hook=hook,
data=str('payload={0}').format(data)) # future lint: blacklisted-function
if result:
return True
else:
return result |
def get_timestamp_expression(self, time_grain):
"""Getting the time component of the query"""
label = utils.DTTM_ALIAS
db = self.table.database
pdf = self.python_date_format
is_epoch = pdf in ('epoch_s', 'epoch_ms')
if not self.expression and not time_grain and not is_epoch:
sqla_col = column(self.column_name, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
grain = None
if time_grain:
grain = db.grains_dict().get(time_grain)
if not grain:
raise NotImplementedError(
f'No grain spec for {time_grain} for database {db.database_name}')
col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name)
expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain)
sqla_col = literal_column(expr, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label) | Getting the time component of the query | Below is the the instruction that describes the task:
### Input:
Getting the time component of the query
### Response:
def get_timestamp_expression(self, time_grain):
"""Getting the time component of the query"""
label = utils.DTTM_ALIAS
db = self.table.database
pdf = self.python_date_format
is_epoch = pdf in ('epoch_s', 'epoch_ms')
if not self.expression and not time_grain and not is_epoch:
sqla_col = column(self.column_name, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
grain = None
if time_grain:
grain = db.grains_dict().get(time_grain)
if not grain:
raise NotImplementedError(
f'No grain spec for {time_grain} for database {db.database_name}')
col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name)
expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain)
sqla_col = literal_column(expr, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label) |
def collapse_pane(self, side):
"""
Toggle collapsing the left or right panes.
"""
# TODO: this is too tied to one configuration, need to figure
# out how to generalize this
hsplit = self.w['hpnl']
sizes = hsplit.get_sizes()
lsize, msize, rsize = sizes
if self._lsize is None:
self._lsize, self._rsize = lsize, rsize
self.logger.debug("left=%d mid=%d right=%d" % (
lsize, msize, rsize))
if side == 'right':
if rsize < 10:
# restore pane
rsize = self._rsize
msize -= rsize
else:
# minimize pane
self._rsize = rsize
msize += rsize
rsize = 0
elif side == 'left':
if lsize < 10:
# restore pane
lsize = self._lsize
msize -= lsize
else:
# minimize pane
self._lsize = lsize
msize += lsize
lsize = 0
hsplit.set_sizes([lsize, msize, rsize]) | Toggle collapsing the left or right panes. | Below is the the instruction that describes the task:
### Input:
Toggle collapsing the left or right panes.
### Response:
def collapse_pane(self, side):
"""
Toggle collapsing the left or right panes.
"""
# TODO: this is too tied to one configuration, need to figure
# out how to generalize this
hsplit = self.w['hpnl']
sizes = hsplit.get_sizes()
lsize, msize, rsize = sizes
if self._lsize is None:
self._lsize, self._rsize = lsize, rsize
self.logger.debug("left=%d mid=%d right=%d" % (
lsize, msize, rsize))
if side == 'right':
if rsize < 10:
# restore pane
rsize = self._rsize
msize -= rsize
else:
# minimize pane
self._rsize = rsize
msize += rsize
rsize = 0
elif side == 'left':
if lsize < 10:
# restore pane
lsize = self._lsize
msize -= lsize
else:
# minimize pane
self._lsize = lsize
msize += lsize
lsize = 0
hsplit.set_sizes([lsize, msize, rsize]) |
def conditional(mean, covar, dims_in, dims_out, covariance_type='full'):
""" Return a function f such that f(x) = p(dims_out | dims_in = x) (f actually returns the mean and covariance of the conditional distribution
"""
in_in = covar[ix_(dims_in, dims_in)]
in_out = covar[ix_(dims_in, dims_out)]
out_in = covar[ix_(dims_out, dims_in)]
out_out = covar[ix_(dims_out, dims_out)]
in_in_inv = inv(in_in)
out_in_dot_in_in_inv = out_in.dot(in_in_inv)
cond_covar = out_out - out_in_dot_in_in_inv.dot(in_out)
cond_mean = lambda x: mean[dims_out] + out_in_dot_in_in_inv.dot(x - mean[dims_in])
return lambda x: [cond_mean(x), cond_covar] | Return a function f such that f(x) = p(dims_out | dims_in = x) (f actually returns the mean and covariance of the conditional distribution | Below is the the instruction that describes the task:
### Input:
Return a function f such that f(x) = p(dims_out | dims_in = x) (f actually returns the mean and covariance of the conditional distribution
### Response:
def conditional(mean, covar, dims_in, dims_out, covariance_type='full'):
""" Return a function f such that f(x) = p(dims_out | dims_in = x) (f actually returns the mean and covariance of the conditional distribution
"""
in_in = covar[ix_(dims_in, dims_in)]
in_out = covar[ix_(dims_in, dims_out)]
out_in = covar[ix_(dims_out, dims_in)]
out_out = covar[ix_(dims_out, dims_out)]
in_in_inv = inv(in_in)
out_in_dot_in_in_inv = out_in.dot(in_in_inv)
cond_covar = out_out - out_in_dot_in_in_inv.dot(in_out)
cond_mean = lambda x: mean[dims_out] + out_in_dot_in_in_inv.dot(x - mean[dims_in])
return lambda x: [cond_mean(x), cond_covar] |
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns) | Locate distribution for `requires` and run `script_name` script | Below is the the instruction that describes the task:
### Input:
Locate distribution for `requires` and run `script_name` script
### Response:
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns) |
def splitext(self):
""" p.splitext() -> Return ``(p.stripext(), p.ext)``.
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from ``'.'`` to the end of the
last path segment. This has the property that if
``(a, b) == p.splitext()``, then ``a + b == p``.
.. seealso:: :func:`os.path.splitext`
"""
filename, ext = self.module.splitext(self)
return self._next_class(filename), ext | p.splitext() -> Return ``(p.stripext(), p.ext)``.
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from ``'.'`` to the end of the
last path segment. This has the property that if
``(a, b) == p.splitext()``, then ``a + b == p``.
.. seealso:: :func:`os.path.splitext` | Below is the the instruction that describes the task:
### Input:
p.splitext() -> Return ``(p.stripext(), p.ext)``.
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from ``'.'`` to the end of the
last path segment. This has the property that if
``(a, b) == p.splitext()``, then ``a + b == p``.
.. seealso:: :func:`os.path.splitext`
### Response:
def splitext(self):
""" p.splitext() -> Return ``(p.stripext(), p.ext)``.
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from ``'.'`` to the end of the
last path segment. This has the property that if
``(a, b) == p.splitext()``, then ``a + b == p``.
.. seealso:: :func:`os.path.splitext`
"""
filename, ext = self.module.splitext(self)
return self._next_class(filename), ext |
def convert_raw_tuple(value_tuple, format_string):
"""
Convert a tuple of raw values, according to the given line format.
:param tuple value_tuple: the tuple of raw values
:param str format_string: the format of the tuple
:rtype: list of tuples
"""
values = []
for v, c in zip(value_tuple, format_string):
if v is None:
# append None
values.append(v)
elif c == u"s":
# string
values.append(v)
elif c == u"S":
# string, split using space as delimiter
values.append([s for s in v.split(u" ") if len(s) > 0])
elif c == u"i":
# int
values.append(int(v))
elif c == u"U":
# Unicode
values.append(convert_unicode_field(v))
elif c == u"A":
# ASCII
values.append(convert_ascii_field(v))
#elif c == u"x":
# # ignore
# pass
return tuple(values) | Convert a tuple of raw values, according to the given line format.
:param tuple value_tuple: the tuple of raw values
:param str format_string: the format of the tuple
:rtype: list of tuples | Below is the the instruction that describes the task:
### Input:
Convert a tuple of raw values, according to the given line format.
:param tuple value_tuple: the tuple of raw values
:param str format_string: the format of the tuple
:rtype: list of tuples
### Response:
def convert_raw_tuple(value_tuple, format_string):
"""
Convert a tuple of raw values, according to the given line format.
:param tuple value_tuple: the tuple of raw values
:param str format_string: the format of the tuple
:rtype: list of tuples
"""
values = []
for v, c in zip(value_tuple, format_string):
if v is None:
# append None
values.append(v)
elif c == u"s":
# string
values.append(v)
elif c == u"S":
# string, split using space as delimiter
values.append([s for s in v.split(u" ") if len(s) > 0])
elif c == u"i":
# int
values.append(int(v))
elif c == u"U":
# Unicode
values.append(convert_unicode_field(v))
elif c == u"A":
# ASCII
values.append(convert_ascii_field(v))
#elif c == u"x":
# # ignore
# pass
return tuple(values) |
def node_created_handler(sender, **kwargs):
""" send notification when a new node is created according to users's settings """
if kwargs['created']:
obj = kwargs['instance']
queryset = exclude_owner_of_node(obj)
create_notifications.delay(**{
"users": queryset,
"notification_model": Notification,
"notification_type": "node_created",
"related_object": obj
}) | send notification when a new node is created according to users's settings | Below is the the instruction that describes the task:
### Input:
send notification when a new node is created according to users's settings
### Response:
def node_created_handler(sender, **kwargs):
""" send notification when a new node is created according to users's settings """
if kwargs['created']:
obj = kwargs['instance']
queryset = exclude_owner_of_node(obj)
create_notifications.delay(**{
"users": queryset,
"notification_model": Notification,
"notification_type": "node_created",
"related_object": obj
}) |
def tab_name_editor(self):
"""Trigger the tab name editor."""
index = self.tabwidget.currentIndex()
self.tabwidget.tabBar().tab_name_editor.edit_tab(index) | Trigger the tab name editor. | Below is the the instruction that describes the task:
### Input:
Trigger the tab name editor.
### Response:
def tab_name_editor(self):
"""Trigger the tab name editor."""
index = self.tabwidget.currentIndex()
self.tabwidget.tabBar().tab_name_editor.edit_tab(index) |
def get_internal_modules(key='exa'):
"""
Get a list of modules belonging to the given package.
Args:
key (str): Package or library name (e.g. "exa")
"""
key += '.'
return [v for k, v in sys.modules.items() if k.startswith(key)] | Get a list of modules belonging to the given package.
Args:
key (str): Package or library name (e.g. "exa") | Below is the the instruction that describes the task:
### Input:
Get a list of modules belonging to the given package.
Args:
key (str): Package or library name (e.g. "exa")
### Response:
def get_internal_modules(key='exa'):
"""
Get a list of modules belonging to the given package.
Args:
key (str): Package or library name (e.g. "exa")
"""
key += '.'
return [v for k, v in sys.modules.items() if k.startswith(key)] |
def on_drag_data_get(self, widget, context, data, info, time):
"""dragged state is inserted and its state_id sent to the receiver
:param widget:
:param context:
:param data: SelectionData: contains state_id
:param info:
:param time:
"""
library_state = self._get_selected_library_state()
import rafcon.gui.helpers.state_machine as gui_helper_state_machine
gui_helper_state_machine.add_state_by_drag_and_drop(library_state, data) | dragged state is inserted and its state_id sent to the receiver
:param widget:
:param context:
:param data: SelectionData: contains state_id
:param info:
:param time: | Below is the the instruction that describes the task:
### Input:
dragged state is inserted and its state_id sent to the receiver
:param widget:
:param context:
:param data: SelectionData: contains state_id
:param info:
:param time:
### Response:
def on_drag_data_get(self, widget, context, data, info, time):
"""dragged state is inserted and its state_id sent to the receiver
:param widget:
:param context:
:param data: SelectionData: contains state_id
:param info:
:param time:
"""
library_state = self._get_selected_library_state()
import rafcon.gui.helpers.state_machine as gui_helper_state_machine
gui_helper_state_machine.add_state_by_drag_and_drop(library_state, data) |
def ec2_elasticip_elasticip_ipaddress(self, lookup, default=None):
"""
Args:
lookup: the CloudFormation resource name of the Elastic IP address to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
The IP address of the first Elastic IP found with a description matching 'lookup' or default/None if no match
"""
# Extract environment from resource ID to build stack name
m = re.search('ElasticIp([A-Z]?[a-z]+[0-9]?)\w+', lookup)
# The lookup string was not a valid ElasticIp resource label
if m is None:
return default
env = m.group(1)
stackname = "{}-elasticip".format(env.lower())
# Convert env substring to title in case {{ENV}} substitution is being used
lookup = lookup.replace(env, env.title())
# Look up the EIP resource in the stack to get the IP address assigned to the EIP
try:
eip_stack = EFAwsResolver.__CLIENTS["cloudformation"].describe_stack_resources(
StackName=stackname,
LogicalResourceId=lookup
)
except ClientError:
return default
stack_resources = eip_stack["StackResources"]
# Resource does not exist in stack
if len(stack_resources) < 1:
return default
eip_publicip = stack_resources[0]["PhysicalResourceId"]
return eip_publicip | Args:
lookup: the CloudFormation resource name of the Elastic IP address to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
The IP address of the first Elastic IP found with a description matching 'lookup' or default/None if no match | Below is the the instruction that describes the task:
### Input:
Args:
lookup: the CloudFormation resource name of the Elastic IP address to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
The IP address of the first Elastic IP found with a description matching 'lookup' or default/None if no match
### Response:
def ec2_elasticip_elasticip_ipaddress(self, lookup, default=None):
"""
Args:
lookup: the CloudFormation resource name of the Elastic IP address to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
The IP address of the first Elastic IP found with a description matching 'lookup' or default/None if no match
"""
# Extract environment from resource ID to build stack name
m = re.search('ElasticIp([A-Z]?[a-z]+[0-9]?)\w+', lookup)
# The lookup string was not a valid ElasticIp resource label
if m is None:
return default
env = m.group(1)
stackname = "{}-elasticip".format(env.lower())
# Convert env substring to title in case {{ENV}} substitution is being used
lookup = lookup.replace(env, env.title())
# Look up the EIP resource in the stack to get the IP address assigned to the EIP
try:
eip_stack = EFAwsResolver.__CLIENTS["cloudformation"].describe_stack_resources(
StackName=stackname,
LogicalResourceId=lookup
)
except ClientError:
return default
stack_resources = eip_stack["StackResources"]
# Resource does not exist in stack
if len(stack_resources) < 1:
return default
eip_publicip = stack_resources[0]["PhysicalResourceId"]
return eip_publicip |
def authenticate(url, account, key, by='name', expires=0, timestamp=None,
timeout=None, request_type="xml", admin_auth=False,
use_password=False, raise_on_error=False):
""" Authenticate to the Zimbra server
:param url: URL of Zimbra SOAP service
:param account: The account to be authenticated against
:param key: The preauth key of the domain of the account or a password (if
admin_auth or use_password is True)
:param by: If the account is specified as a name, an ID or a
ForeignPrincipal
:param expires: When the token expires (or 0 for default expiration)
:param timestamp: When the token was requested (None for "now")
:param timeout: Timeout for the communication with the server. Defaults
to the urllib2-default
:param request_type: Which type of request to use ("xml" (default) or
"json")
:param admin_auth: This request should authenticate and generate an admin
token. The "key"-parameter therefore holds the admin password (implies
use_password)
:param use_password: The "key"-parameter holds a password. Do a password-
based user authentication.
:param raise_on_error: Should I raise an exception when an authentication
error occurs or just return None?
:return: The authentication token or None
:rtype: str or None or unicode
"""
if timestamp is None:
timestamp = int(time.time()) * 1000
pak = ""
if not admin_auth:
pak = preauth.create_preauth(account, key, by, expires, timestamp)
if request_type == 'xml':
auth_request = RequestXml()
else:
auth_request = RequestJson()
request_data = {
'account': {
'by': by,
'_content': account
}
}
ns = "urn:zimbraAccount"
if admin_auth:
ns = "urn:zimbraAdmin"
request_data['password'] = key
elif use_password:
request_data['password'] = {
"_content": key
}
else:
request_data['preauth'] = {
'timestamp': timestamp,
'expires': expires,
'_content': pak
}
auth_request.add_request(
'AuthRequest',
request_data,
ns
)
server = Communication(url, timeout)
if request_type == 'xml':
response = ResponseXml()
else:
response = ResponseJson()
server.send_request(auth_request, response)
if response.is_fault():
if raise_on_error:
raise AuthenticationFailed(
"Cannot authenticate user: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
return None
return response.get_response()['AuthResponse']['authToken'] | Authenticate to the Zimbra server
:param url: URL of Zimbra SOAP service
:param account: The account to be authenticated against
:param key: The preauth key of the domain of the account or a password (if
admin_auth or use_password is True)
:param by: If the account is specified as a name, an ID or a
ForeignPrincipal
:param expires: When the token expires (or 0 for default expiration)
:param timestamp: When the token was requested (None for "now")
:param timeout: Timeout for the communication with the server. Defaults
to the urllib2-default
:param request_type: Which type of request to use ("xml" (default) or
"json")
:param admin_auth: This request should authenticate and generate an admin
token. The "key"-parameter therefore holds the admin password (implies
use_password)
:param use_password: The "key"-parameter holds a password. Do a password-
based user authentication.
:param raise_on_error: Should I raise an exception when an authentication
error occurs or just return None?
:return: The authentication token or None
:rtype: str or None or unicode | Below is the the instruction that describes the task:
### Input:
Authenticate to the Zimbra server
:param url: URL of Zimbra SOAP service
:param account: The account to be authenticated against
:param key: The preauth key of the domain of the account or a password (if
admin_auth or use_password is True)
:param by: If the account is specified as a name, an ID or a
ForeignPrincipal
:param expires: When the token expires (or 0 for default expiration)
:param timestamp: When the token was requested (None for "now")
:param timeout: Timeout for the communication with the server. Defaults
to the urllib2-default
:param request_type: Which type of request to use ("xml" (default) or
"json")
:param admin_auth: This request should authenticate and generate an admin
token. The "key"-parameter therefore holds the admin password (implies
use_password)
:param use_password: The "key"-parameter holds a password. Do a password-
based user authentication.
:param raise_on_error: Should I raise an exception when an authentication
error occurs or just return None?
:return: The authentication token or None
:rtype: str or None or unicode
### Response:
def authenticate(url, account, key, by='name', expires=0, timestamp=None,
timeout=None, request_type="xml", admin_auth=False,
use_password=False, raise_on_error=False):
""" Authenticate to the Zimbra server
:param url: URL of Zimbra SOAP service
:param account: The account to be authenticated against
:param key: The preauth key of the domain of the account or a password (if
admin_auth or use_password is True)
:param by: If the account is specified as a name, an ID or a
ForeignPrincipal
:param expires: When the token expires (or 0 for default expiration)
:param timestamp: When the token was requested (None for "now")
:param timeout: Timeout for the communication with the server. Defaults
to the urllib2-default
:param request_type: Which type of request to use ("xml" (default) or
"json")
:param admin_auth: This request should authenticate and generate an admin
token. The "key"-parameter therefore holds the admin password (implies
use_password)
:param use_password: The "key"-parameter holds a password. Do a password-
based user authentication.
:param raise_on_error: Should I raise an exception when an authentication
error occurs or just return None?
:return: The authentication token or None
:rtype: str or None or unicode
"""
if timestamp is None:
timestamp = int(time.time()) * 1000
pak = ""
if not admin_auth:
pak = preauth.create_preauth(account, key, by, expires, timestamp)
if request_type == 'xml':
auth_request = RequestXml()
else:
auth_request = RequestJson()
request_data = {
'account': {
'by': by,
'_content': account
}
}
ns = "urn:zimbraAccount"
if admin_auth:
ns = "urn:zimbraAdmin"
request_data['password'] = key
elif use_password:
request_data['password'] = {
"_content": key
}
else:
request_data['preauth'] = {
'timestamp': timestamp,
'expires': expires,
'_content': pak
}
auth_request.add_request(
'AuthRequest',
request_data,
ns
)
server = Communication(url, timeout)
if request_type == 'xml':
response = ResponseXml()
else:
response = ResponseJson()
server.send_request(auth_request, response)
if response.is_fault():
if raise_on_error:
raise AuthenticationFailed(
"Cannot authenticate user: (%s) %s" % (
response.get_fault_code(),
response.get_fault_message()
)
)
return None
return response.get_response()['AuthResponse']['authToken'] |
def get(self):
'''taobao.time.get 获取前台展示的店铺类目
获取淘宝系统当前时间'''
request = TOPRequest('taobao.time.get')
self.create(self.execute(request))
return self.time | taobao.time.get 获取前台展示的店铺类目
获取淘宝系统当前时间 | Below is the the instruction that describes the task:
### Input:
taobao.time.get 获取前台展示的店铺类目
获取淘宝系统当前时间
### Response:
def get(self):
'''taobao.time.get 获取前台展示的店铺类目
获取淘宝系统当前时间'''
request = TOPRequest('taobao.time.get')
self.create(self.execute(request))
return self.time |
def day(t, now=None, format='%B %d'):
'''
Date delta compared to ``t``. You can override ``now`` to specify what date
to compare to.
You can override the date format by supplying a ``format`` parameter.
:param t: timestamp, :class:`datetime.date` or :class:`datetime.datetime`
object
:param now: default ``None``, optionally a :class:`datetime.datetime`
object
:param format: default ``'%B %d'``
>>> import time
>>> print(day(time.time()))
today
>>> print(day(time.time() - 86400))
yesterday
>>> print(day(time.time() - 604800))
last week
>>> print(day(time.time() + 86400))
tomorrow
>>> print(day(time.time() + 604800))
next week
'''
t1 = _to_date(t)
t2 = _to_date(now or datetime.datetime.now())
diff = t1 - t2
secs = _total_seconds(diff)
days = abs(diff.days)
if days == 0:
return _('today')
elif days == 1:
if secs < 0:
return _('yesterday')
else:
return _('tomorrow')
elif days == 7:
if secs < 0:
return _('last week')
else:
return _('next week')
else:
return t1.strftime(format) | Date delta compared to ``t``. You can override ``now`` to specify what date
to compare to.
You can override the date format by supplying a ``format`` parameter.
:param t: timestamp, :class:`datetime.date` or :class:`datetime.datetime`
object
:param now: default ``None``, optionally a :class:`datetime.datetime`
object
:param format: default ``'%B %d'``
>>> import time
>>> print(day(time.time()))
today
>>> print(day(time.time() - 86400))
yesterday
>>> print(day(time.time() - 604800))
last week
>>> print(day(time.time() + 86400))
tomorrow
>>> print(day(time.time() + 604800))
next week | Below is the the instruction that describes the task:
### Input:
Date delta compared to ``t``. You can override ``now`` to specify what date
to compare to.
You can override the date format by supplying a ``format`` parameter.
:param t: timestamp, :class:`datetime.date` or :class:`datetime.datetime`
object
:param now: default ``None``, optionally a :class:`datetime.datetime`
object
:param format: default ``'%B %d'``
>>> import time
>>> print(day(time.time()))
today
>>> print(day(time.time() - 86400))
yesterday
>>> print(day(time.time() - 604800))
last week
>>> print(day(time.time() + 86400))
tomorrow
>>> print(day(time.time() + 604800))
next week
### Response:
def day(t, now=None, format='%B %d'):
'''
Date delta compared to ``t``. You can override ``now`` to specify what date
to compare to.
You can override the date format by supplying a ``format`` parameter.
:param t: timestamp, :class:`datetime.date` or :class:`datetime.datetime`
object
:param now: default ``None``, optionally a :class:`datetime.datetime`
object
:param format: default ``'%B %d'``
>>> import time
>>> print(day(time.time()))
today
>>> print(day(time.time() - 86400))
yesterday
>>> print(day(time.time() - 604800))
last week
>>> print(day(time.time() + 86400))
tomorrow
>>> print(day(time.time() + 604800))
next week
'''
t1 = _to_date(t)
t2 = _to_date(now or datetime.datetime.now())
diff = t1 - t2
secs = _total_seconds(diff)
days = abs(diff.days)
if days == 0:
return _('today')
elif days == 1:
if secs < 0:
return _('yesterday')
else:
return _('tomorrow')
elif days == 7:
if secs < 0:
return _('last week')
else:
return _('next week')
else:
return t1.strftime(format) |
def name(self):
"""The descriptive device name as advertised by the kernel
and/or the hardware itself.
To get the sysname for this device, use :attr:`sysname`.
Returns:
str: The device name.
"""
pchar = self._libinput.libinput_device_get_name(self._handle)
return string_at(pchar).decode() | The descriptive device name as advertised by the kernel
and/or the hardware itself.
To get the sysname for this device, use :attr:`sysname`.
Returns:
str: The device name. | Below is the the instruction that describes the task:
### Input:
The descriptive device name as advertised by the kernel
and/or the hardware itself.
To get the sysname for this device, use :attr:`sysname`.
Returns:
str: The device name.
### Response:
def name(self):
"""The descriptive device name as advertised by the kernel
and/or the hardware itself.
To get the sysname for this device, use :attr:`sysname`.
Returns:
str: The device name.
"""
pchar = self._libinput.libinput_device_get_name(self._handle)
return string_at(pchar).decode() |
def CreateTasksFilter(pc, tasks):
""" Create property collector filter for tasks """
if not tasks:
return None
# First create the object specification as the task object.
objspecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
# Next, create the property specification as the state.
propspec = vmodl.query.PropertyCollector.PropertySpec(
type=vim.Task, pathSet=[], all=True)
# Create a filter spec with the specified object and property spec.
filterspec = vmodl.query.PropertyCollector.FilterSpec()
filterspec.objectSet = objspecs
filterspec.propSet = [propspec]
# Create the filter
return pc.CreateFilter(filterspec, True) | Create property collector filter for tasks | Below is the the instruction that describes the task:
### Input:
Create property collector filter for tasks
### Response:
def CreateTasksFilter(pc, tasks):
""" Create property collector filter for tasks """
if not tasks:
return None
# First create the object specification as the task object.
objspecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
# Next, create the property specification as the state.
propspec = vmodl.query.PropertyCollector.PropertySpec(
type=vim.Task, pathSet=[], all=True)
# Create a filter spec with the specified object and property spec.
filterspec = vmodl.query.PropertyCollector.FilterSpec()
filterspec.objectSet = objspecs
filterspec.propSet = [propspec]
# Create the filter
return pc.CreateFilter(filterspec, True) |
def to_match(self):
"""Return a unicode object with the MATCH representation of this expression."""
self.validate()
mark_name, field_name = self.location.get_location_name()
validate_safe_string(mark_name)
if field_name is not None:
raise AssertionError(u'Vertex location has non-None field_name: '
u'{} {}'.format(field_name, self.location))
return mark_name | Return a unicode object with the MATCH representation of this expression. | Below is the the instruction that describes the task:
### Input:
Return a unicode object with the MATCH representation of this expression.
### Response:
def to_match(self):
"""Return a unicode object with the MATCH representation of this expression."""
self.validate()
mark_name, field_name = self.location.get_location_name()
validate_safe_string(mark_name)
if field_name is not None:
raise AssertionError(u'Vertex location has non-None field_name: '
u'{} {}'.format(field_name, self.location))
return mark_name |
def geocode(address):
'''Query function to obtain a latitude and longitude from a location
string such as `Houston, TX` or`Colombia`. This uses an online lookup,
currently wrapping the `geopy` library, and providing an on-disk cache
of queries.
Parameters
----------
address : str
Search string to retrieve the location, [-]
Returns
-------
latitude : float
Latitude of address, [degrees]
longitude : float
Longitude of address, [degrees]
Notes
-----
If a query has been retrieved before, this function will take under 1 ms;
it takes several seconds otherwise.
Examples
--------
>>> geocode('Fredericton, NB')
(45.966425, -66.645813)
'''
loc_tuple = None
try:
cache = geopy_cache()
loc_tuple = cache.cached_address(address)
except:
# Handle bugs in the cache, i.e. if there is no space on disk to create
# the database, by ignoring them
pass
if loc_tuple is not None:
return loc_tuple
else:
geocoder = geopy_geolocator()
if geocoder is None:
return geopy_missing_msg
location = geocoder.geocode(address)
try:
cache.cache_address(address, location.latitude, location.longitude)
except:
pass
return (location.latitude, location.longitude) | Query function to obtain a latitude and longitude from a location
string such as `Houston, TX` or`Colombia`. This uses an online lookup,
currently wrapping the `geopy` library, and providing an on-disk cache
of queries.
Parameters
----------
address : str
Search string to retrieve the location, [-]
Returns
-------
latitude : float
Latitude of address, [degrees]
longitude : float
Longitude of address, [degrees]
Notes
-----
If a query has been retrieved before, this function will take under 1 ms;
it takes several seconds otherwise.
Examples
--------
>>> geocode('Fredericton, NB')
(45.966425, -66.645813) | Below is the the instruction that describes the task:
### Input:
Query function to obtain a latitude and longitude from a location
string such as `Houston, TX` or`Colombia`. This uses an online lookup,
currently wrapping the `geopy` library, and providing an on-disk cache
of queries.
Parameters
----------
address : str
Search string to retrieve the location, [-]
Returns
-------
latitude : float
Latitude of address, [degrees]
longitude : float
Longitude of address, [degrees]
Notes
-----
If a query has been retrieved before, this function will take under 1 ms;
it takes several seconds otherwise.
Examples
--------
>>> geocode('Fredericton, NB')
(45.966425, -66.645813)
### Response:
def geocode(address):
'''Query function to obtain a latitude and longitude from a location
string such as `Houston, TX` or`Colombia`. This uses an online lookup,
currently wrapping the `geopy` library, and providing an on-disk cache
of queries.
Parameters
----------
address : str
Search string to retrieve the location, [-]
Returns
-------
latitude : float
Latitude of address, [degrees]
longitude : float
Longitude of address, [degrees]
Notes
-----
If a query has been retrieved before, this function will take under 1 ms;
it takes several seconds otherwise.
Examples
--------
>>> geocode('Fredericton, NB')
(45.966425, -66.645813)
'''
loc_tuple = None
try:
cache = geopy_cache()
loc_tuple = cache.cached_address(address)
except:
# Handle bugs in the cache, i.e. if there is no space on disk to create
# the database, by ignoring them
pass
if loc_tuple is not None:
return loc_tuple
else:
geocoder = geopy_geolocator()
if geocoder is None:
return geopy_missing_msg
location = geocoder.geocode(address)
try:
cache.cache_address(address, location.latitude, location.longitude)
except:
pass
return (location.latitude, location.longitude) |
def old_lambdef(self, lambda_loc, args_opt, colon_loc, body):
"""(2.6, 2.7) old_lambdef: 'lambda' [varargslist] ':' old_test"""
if args_opt is None:
args_opt = self._arguments()
args_opt.loc = colon_loc.begin()
return ast.Lambda(args=args_opt, body=body,
lambda_loc=lambda_loc, colon_loc=colon_loc,
loc=lambda_loc.join(body.loc)) | (2.6, 2.7) old_lambdef: 'lambda' [varargslist] ':' old_test | Below is the the instruction that describes the task:
### Input:
(2.6, 2.7) old_lambdef: 'lambda' [varargslist] ':' old_test
### Response:
def old_lambdef(self, lambda_loc, args_opt, colon_loc, body):
"""(2.6, 2.7) old_lambdef: 'lambda' [varargslist] ':' old_test"""
if args_opt is None:
args_opt = self._arguments()
args_opt.loc = colon_loc.begin()
return ast.Lambda(args=args_opt, body=body,
lambda_loc=lambda_loc, colon_loc=colon_loc,
loc=lambda_loc.join(body.loc)) |
def simplex_optimal(self, t):
'''
API:
simplex_optimal(self, t)
Description:
Checks if the current solution is optimal, if yes returns True,
False otherwise.
Pre:
'flow' attributes represents a solution.
Input:
t: Graph instance tat reperesents spanning tree solution.
Return:
Returns True if the current solution is optimal (optimality
conditions are satisfied), else returns False
'''
for e in self.edge_attr:
if e in t.edge_attr:
continue
flow_ij = self.edge_attr[e]['flow']
potential_i = self.get_node(e[0]).get_attr('potential')
potential_j = self.get_node(e[1]).get_attr('potential')
capacity_ij = self.edge_attr[e]['capacity']
c_ij = self.edge_attr[e]['cost']
cpi_ij = c_ij - potential_i + potential_j
if flow_ij==0:
if cpi_ij < 0:
return False
elif flow_ij==capacity_ij:
if cpi_ij > 0:
return False
return True | API:
simplex_optimal(self, t)
Description:
Checks if the current solution is optimal, if yes returns True,
False otherwise.
Pre:
'flow' attributes represents a solution.
Input:
t: Graph instance tat reperesents spanning tree solution.
Return:
Returns True if the current solution is optimal (optimality
conditions are satisfied), else returns False | Below is the the instruction that describes the task:
### Input:
API:
simplex_optimal(self, t)
Description:
Checks if the current solution is optimal, if yes returns True,
False otherwise.
Pre:
'flow' attributes represents a solution.
Input:
t: Graph instance tat reperesents spanning tree solution.
Return:
Returns True if the current solution is optimal (optimality
conditions are satisfied), else returns False
### Response:
def simplex_optimal(self, t):
'''
API:
simplex_optimal(self, t)
Description:
Checks if the current solution is optimal, if yes returns True,
False otherwise.
Pre:
'flow' attributes represents a solution.
Input:
t: Graph instance tat reperesents spanning tree solution.
Return:
Returns True if the current solution is optimal (optimality
conditions are satisfied), else returns False
'''
for e in self.edge_attr:
if e in t.edge_attr:
continue
flow_ij = self.edge_attr[e]['flow']
potential_i = self.get_node(e[0]).get_attr('potential')
potential_j = self.get_node(e[1]).get_attr('potential')
capacity_ij = self.edge_attr[e]['capacity']
c_ij = self.edge_attr[e]['cost']
cpi_ij = c_ij - potential_i + potential_j
if flow_ij==0:
if cpi_ij < 0:
return False
elif flow_ij==capacity_ij:
if cpi_ij > 0:
return False
return True |
def get_serializer_class(self, view, method_func):
"""
Try to get the serializer class from view method.
If view method don't have request serializer, fallback to serializer_class on view class
"""
if hasattr(method_func, 'request_serializer'):
return getattr(method_func, 'request_serializer')
if hasattr(view, 'serializer_class'):
return getattr(view, 'serializer_class')
if hasattr(view, 'get_serializer_class'):
return getattr(view, 'get_serializer_class')()
return None | Try to get the serializer class from view method.
If view method don't have request serializer, fallback to serializer_class on view class | Below is the the instruction that describes the task:
### Input:
Try to get the serializer class from view method.
If view method don't have request serializer, fallback to serializer_class on view class
### Response:
def get_serializer_class(self, view, method_func):
"""
Try to get the serializer class from view method.
If view method don't have request serializer, fallback to serializer_class on view class
"""
if hasattr(method_func, 'request_serializer'):
return getattr(method_func, 'request_serializer')
if hasattr(view, 'serializer_class'):
return getattr(view, 'serializer_class')
if hasattr(view, 'get_serializer_class'):
return getattr(view, 'get_serializer_class')()
return None |
def bdh(
tickers, flds=None, start_date=None, end_date='today', adjust=None, **kwargs
) -> pd.DataFrame:
"""
Bloomberg historical data
Args:
tickers: ticker(s)
flds: field(s)
start_date: start date
end_date: end date - default today
adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
exact match of above words will adjust for corresponding events
Case 0: `-` no adjustment for dividend or split
Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
Case 2: `adjust` will adjust for splits and ignore all dividends
Case 3: `all` == `dvd|split` == adjust for all
Case 4: None == Bloomberg default OR use kwargs
**kwargs: overrides
Returns:
pd.DataFrame
Examples:
>>> res = bdh(
... tickers='VIX Index', flds=['High', 'Low', 'Last_Price'],
... start_date='2018-02-05', end_date='2018-02-07',
... ).round(2).transpose()
>>> res.index.name = None
>>> res.columns.name = None
>>> res
2018-02-05 2018-02-06 2018-02-07
VIX Index High 38.80 50.30 31.64
Low 16.80 22.42 21.17
Last_Price 37.32 29.98 27.73
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140605', end_date='20140610', adjust='-'
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-05 647.35
2014-06-06 645.57
2014-06-09 93.70
2014-06-10 94.25
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140606', end_date='20140609',
... CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False,
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-06 645.57
2014-06-09 93.70
"""
logger = logs.get_logger(bdh, level=kwargs.pop('log', logs.LOG_LEVEL))
# Dividend adjustments
if isinstance(adjust, str) and adjust:
if adjust == 'all':
kwargs['CshAdjNormal'] = True
kwargs['CshAdjAbnormal'] = True
kwargs['CapChg'] = True
else:
kwargs['CshAdjNormal'] = 'normal' in adjust or 'dvd' in adjust
kwargs['CshAdjAbnormal'] = 'abn' in adjust or 'dvd' in adjust
kwargs['CapChg'] = 'split' in adjust
con, _ = create_connection()
elms = assist.proc_elms(**kwargs)
ovrds = assist.proc_ovrds(**kwargs)
if isinstance(tickers, str): tickers = [tickers]
if flds is None: flds = ['Last_Price']
if isinstance(flds, str): flds = [flds]
e_dt = utils.fmt_dt(end_date, fmt='%Y%m%d')
if start_date is None:
start_date = pd.Timestamp(e_dt) - relativedelta(months=3)
s_dt = utils.fmt_dt(start_date, fmt='%Y%m%d')
logger.info(
f'loading historical data from Bloomberg:\n'
f'{assist.info_qry(tickers=tickers, flds=flds)}'
)
logger.debug(
f'\nflds={flds}\nelms={elms}\novrds={ovrds}\nstart_date={s_dt}\nend_date={e_dt}'
)
res = con.bdh(
tickers=tickers, flds=flds, elms=elms, ovrds=ovrds, start_date=s_dt, end_date=e_dt
)
res.index.name = None
if (len(flds) == 1) and kwargs.get('keep_one', False):
return res.xs(flds[0], axis=1, level=1)
return res | Bloomberg historical data
Args:
tickers: ticker(s)
flds: field(s)
start_date: start date
end_date: end date - default today
adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
exact match of above words will adjust for corresponding events
Case 0: `-` no adjustment for dividend or split
Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
Case 2: `adjust` will adjust for splits and ignore all dividends
Case 3: `all` == `dvd|split` == adjust for all
Case 4: None == Bloomberg default OR use kwargs
**kwargs: overrides
Returns:
pd.DataFrame
Examples:
>>> res = bdh(
... tickers='VIX Index', flds=['High', 'Low', 'Last_Price'],
... start_date='2018-02-05', end_date='2018-02-07',
... ).round(2).transpose()
>>> res.index.name = None
>>> res.columns.name = None
>>> res
2018-02-05 2018-02-06 2018-02-07
VIX Index High 38.80 50.30 31.64
Low 16.80 22.42 21.17
Last_Price 37.32 29.98 27.73
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140605', end_date='20140610', adjust='-'
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-05 647.35
2014-06-06 645.57
2014-06-09 93.70
2014-06-10 94.25
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140606', end_date='20140609',
... CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False,
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-06 645.57
2014-06-09 93.70 | Below is the the instruction that describes the task:
### Input:
Bloomberg historical data
Args:
tickers: ticker(s)
flds: field(s)
start_date: start date
end_date: end date - default today
adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
exact match of above words will adjust for corresponding events
Case 0: `-` no adjustment for dividend or split
Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
Case 2: `adjust` will adjust for splits and ignore all dividends
Case 3: `all` == `dvd|split` == adjust for all
Case 4: None == Bloomberg default OR use kwargs
**kwargs: overrides
Returns:
pd.DataFrame
Examples:
>>> res = bdh(
... tickers='VIX Index', flds=['High', 'Low', 'Last_Price'],
... start_date='2018-02-05', end_date='2018-02-07',
... ).round(2).transpose()
>>> res.index.name = None
>>> res.columns.name = None
>>> res
2018-02-05 2018-02-06 2018-02-07
VIX Index High 38.80 50.30 31.64
Low 16.80 22.42 21.17
Last_Price 37.32 29.98 27.73
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140605', end_date='20140610', adjust='-'
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-05 647.35
2014-06-06 645.57
2014-06-09 93.70
2014-06-10 94.25
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140606', end_date='20140609',
... CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False,
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-06 645.57
2014-06-09 93.70
### Response:
def bdh(
tickers, flds=None, start_date=None, end_date='today', adjust=None, **kwargs
) -> pd.DataFrame:
"""
Bloomberg historical data
Args:
tickers: ticker(s)
flds: field(s)
start_date: start date
end_date: end date - default today
adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
exact match of above words will adjust for corresponding events
Case 0: `-` no adjustment for dividend or split
Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
Case 2: `adjust` will adjust for splits and ignore all dividends
Case 3: `all` == `dvd|split` == adjust for all
Case 4: None == Bloomberg default OR use kwargs
**kwargs: overrides
Returns:
pd.DataFrame
Examples:
>>> res = bdh(
... tickers='VIX Index', flds=['High', 'Low', 'Last_Price'],
... start_date='2018-02-05', end_date='2018-02-07',
... ).round(2).transpose()
>>> res.index.name = None
>>> res.columns.name = None
>>> res
2018-02-05 2018-02-06 2018-02-07
VIX Index High 38.80 50.30 31.64
Low 16.80 22.42 21.17
Last_Price 37.32 29.98 27.73
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140605', end_date='20140610', adjust='-'
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-05 647.35
2014-06-06 645.57
2014-06-09 93.70
2014-06-10 94.25
>>> bdh(
... tickers='AAPL US Equity', flds='Px_Last',
... start_date='20140606', end_date='20140609',
... CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False,
... ).round(2)
ticker AAPL US Equity
field Px_Last
2014-06-06 645.57
2014-06-09 93.70
"""
logger = logs.get_logger(bdh, level=kwargs.pop('log', logs.LOG_LEVEL))
# Dividend adjustments
if isinstance(adjust, str) and adjust:
if adjust == 'all':
kwargs['CshAdjNormal'] = True
kwargs['CshAdjAbnormal'] = True
kwargs['CapChg'] = True
else:
kwargs['CshAdjNormal'] = 'normal' in adjust or 'dvd' in adjust
kwargs['CshAdjAbnormal'] = 'abn' in adjust or 'dvd' in adjust
kwargs['CapChg'] = 'split' in adjust
con, _ = create_connection()
elms = assist.proc_elms(**kwargs)
ovrds = assist.proc_ovrds(**kwargs)
if isinstance(tickers, str): tickers = [tickers]
if flds is None: flds = ['Last_Price']
if isinstance(flds, str): flds = [flds]
e_dt = utils.fmt_dt(end_date, fmt='%Y%m%d')
if start_date is None:
start_date = pd.Timestamp(e_dt) - relativedelta(months=3)
s_dt = utils.fmt_dt(start_date, fmt='%Y%m%d')
logger.info(
f'loading historical data from Bloomberg:\n'
f'{assist.info_qry(tickers=tickers, flds=flds)}'
)
logger.debug(
f'\nflds={flds}\nelms={elms}\novrds={ovrds}\nstart_date={s_dt}\nend_date={e_dt}'
)
res = con.bdh(
tickers=tickers, flds=flds, elms=elms, ovrds=ovrds, start_date=s_dt, end_date=e_dt
)
res.index.name = None
if (len(flds) == 1) and kwargs.get('keep_one', False):
return res.xs(flds[0], axis=1, level=1)
return res |
def whole_line_styled(p):
"""
Checks to see if the whole p tag will end up being bold or italics. Returns
a tuple (boolean, boolean). The first boolean will be True if the whole
line is bold, False otherwise. The second boolean will be True if the whole
line is italics, False otherwise.
"""
r_tags = p.xpath('.//w:r', namespaces=p.nsmap)
tags_are_bold = [
is_bold(r) or is_underlined(r) for r in r_tags
]
tags_are_italics = [
is_italics(r) for r in r_tags
]
return all(tags_are_bold), all(tags_are_italics) | Checks to see if the whole p tag will end up being bold or italics. Returns
a tuple (boolean, boolean). The first boolean will be True if the whole
line is bold, False otherwise. The second boolean will be True if the whole
line is italics, False otherwise. | Below is the the instruction that describes the task:
### Input:
Checks to see if the whole p tag will end up being bold or italics. Returns
a tuple (boolean, boolean). The first boolean will be True if the whole
line is bold, False otherwise. The second boolean will be True if the whole
line is italics, False otherwise.
### Response:
def whole_line_styled(p):
"""
Checks to see if the whole p tag will end up being bold or italics. Returns
a tuple (boolean, boolean). The first boolean will be True if the whole
line is bold, False otherwise. The second boolean will be True if the whole
line is italics, False otherwise.
"""
r_tags = p.xpath('.//w:r', namespaces=p.nsmap)
tags_are_bold = [
is_bold(r) or is_underlined(r) for r in r_tags
]
tags_are_italics = [
is_italics(r) for r in r_tags
]
return all(tags_are_bold), all(tags_are_italics) |
def RepairNodeStorageUnits(r, node, storage_type, name):
"""
Repairs a storage unit on the node.
@type node: str
@param node: node whose storage units to repair
@type storage_type: str
@param storage_type: storage type to repair
@type name: str
@param name: name of the storage unit to repair
@rtype: int
@return: job id
"""
query = {
"storage_type": storage_type,
"name": name,
}
return r.request("put", "/2/nodes/%s/storage/repair" % node, query=query) | Repairs a storage unit on the node.
@type node: str
@param node: node whose storage units to repair
@type storage_type: str
@param storage_type: storage type to repair
@type name: str
@param name: name of the storage unit to repair
@rtype: int
@return: job id | Below is the the instruction that describes the task:
### Input:
Repairs a storage unit on the node.
@type node: str
@param node: node whose storage units to repair
@type storage_type: str
@param storage_type: storage type to repair
@type name: str
@param name: name of the storage unit to repair
@rtype: int
@return: job id
### Response:
def RepairNodeStorageUnits(r, node, storage_type, name):
"""
Repairs a storage unit on the node.
@type node: str
@param node: node whose storage units to repair
@type storage_type: str
@param storage_type: storage type to repair
@type name: str
@param name: name of the storage unit to repair
@rtype: int
@return: job id
"""
query = {
"storage_type": storage_type,
"name": name,
}
return r.request("put", "/2/nodes/%s/storage/repair" % node, query=query) |
def _config_session():
"""
Configure session for particular device
Returns:
tensorflow.Session
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = '0'
return tf.Session(config=config) | Configure session for particular device
Returns:
tensorflow.Session | Below is the the instruction that describes the task:
### Input:
Configure session for particular device
Returns:
tensorflow.Session
### Response:
def _config_session():
"""
Configure session for particular device
Returns:
tensorflow.Session
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = '0'
return tf.Session(config=config) |
def complete_english(string):
"""
>>> complete_english('dont do this')
"don't do this"
>>> complete_english('doesnt is matched as well')
"doesn't is matched as well"
"""
for x, y in [("dont", "don't"),
("doesnt", "doesn't"),
("wont", "won't"),
("wasnt", "wasn't")]:
string = string.replace(x, y)
return string | >>> complete_english('dont do this')
"don't do this"
>>> complete_english('doesnt is matched as well')
"doesn't is matched as well" | Below is the the instruction that describes the task:
### Input:
>>> complete_english('dont do this')
"don't do this"
>>> complete_english('doesnt is matched as well')
"doesn't is matched as well"
### Response:
def complete_english(string):
"""
>>> complete_english('dont do this')
"don't do this"
>>> complete_english('doesnt is matched as well')
"doesn't is matched as well"
"""
for x, y in [("dont", "don't"),
("doesnt", "doesn't"),
("wont", "won't"),
("wasnt", "wasn't")]:
string = string.replace(x, y)
return string |
def plotRaster (include = ['allCells'], timeRange = None, maxSpikes = 1e8, orderBy = 'gid', orderInverse = False, labels = 'legend', popRates = False,
spikeHist=None, spikeHistBin=5, syncLines=False, lw=2, marker='|', markerSize=5, popColors=None, figSize=(10, 8), fontSize=12,
dpi = 100, saveData = None, saveFig = None, showFig = True):
'''
Raster plot of network cells
- include (['all',|'allCells',|'allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to include (default: 'allCells')
- timeRange ([start:stop]): Time range of spikes shown; if None shows all (default: None)
- maxSpikes (int): maximum number of spikes that will be plotted (default: 1e8)
- orderBy ('gid'|'y'|'ynorm'|...): Unique numeric cell property to order y-axis by, e.g. 'gid', 'ynorm', 'y' (default: 'gid')
- orderInverse (True|False): Invert the y-axis order (default: False)
- labels = ('legend', 'overlay'): Show population labels in a legend or overlayed on one side of raster (default: 'legend')
- popRates = (True|False): Include population rates (default: False)
- spikeHist (None|'overlay'|'subplot'): overlay line over raster showing spike histogram (spikes/bin) (default: False)
- spikeHistBin (int): Size of bin in ms to use for histogram (default: 5)
- syncLines (True|False): calculate synchorny measure and plot vertical lines for each spike to evidence synchrony (default: False)
- lw (integer): Line width for each spike (default: 2)
- marker (char): Marker for each spike (default: '|')
- popColors (odict): Dictionary with color (value) used for each population (key) (default: None)
- figSize ((width, height)): Size of figure (default: (10,8))
- dpi (int): Dots per inch to save fig (default: 100)
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure (default: None)
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handle
'''
from .. import sim
print('Plotting raster...')
# Select cells to include
cells, cellGids, netStimLabels = getCellsInclude(include)
df = pd.DataFrame.from_records(cells)
df = pd.concat([df.drop('tags', axis=1), pd.DataFrame.from_records(df['tags'].tolist())], axis=1)
keep = ['pop', 'gid', 'conns']
if isinstance(orderBy, basestring) and orderBy not in cells[0]['tags']: # if orderBy property doesn't exist or is not numeric, use gid
orderBy = 'gid'
elif orderBy is 'pop':
df['popInd'] = df['pop'].astype('category')
df['popInd'].cat.set_categories(sim.net.pops.keys(), inplace=True)
orderBy='popInd'
elif isinstance(orderBy, basestring) and not isinstance(cells[0]['tags'][orderBy], Number):
orderBy = 'gid'
if isinstance(orderBy, list):
if 'pop' in orderBy:
df['popInd'] = df['pop'].astype('category')
df['popInd'].cat.set_categories(sim.net.pops.keys(), inplace=True)
orderBy[orderBy.index('pop')] = 'popInd'
keep = keep + list(set(orderBy) - set(keep))
elif orderBy not in keep:
keep.append(orderBy)
df = df[keep]
popLabels = [pop for pop in sim.net.allPops if pop in df['pop'].unique()] #preserves original ordering
if netStimLabels: popLabels.append('NetStims')
popColorsTmp = {popLabel: colorList[ipop%len(colorList)] for ipop,popLabel in enumerate(popLabels)} # dict with color for each pop
if popColors: popColorsTmp.update(popColors)
popColors = popColorsTmp
if len(cellGids) > 0:
gidColors = {cell['gid']: popColors[cell['tags']['pop']] for cell in cells} # dict with color for each gid
try:
sel, spkts,spkgids = getSpktSpkid(cellGids=cellGids, timeRange=timeRange, allCells=(include == ['allCells']))
except:
import sys
print((sys.exc_info()))
spkgids, spkts = [], []
sel = pd.DataFrame(columns=['spkt', 'spkid'])
sel['spkgidColor'] = sel['spkid'].map(gidColors)
df['gidColor'] = df['pop'].map(popColors)
df.set_index('gid', inplace=True)
# Order by
if len(df) > 0:
ylabelText = 'Cells (ordered by %s)'%(orderBy)
df = df.sort_values(by=orderBy)
sel['spkind'] = sel['spkid'].apply(df.index.get_loc)
else:
sel = pd.DataFrame(columns=['spkt', 'spkid', 'spkind'])
ylabelText = ''
# Add NetStim spikes
numCellSpks = len(sel)
numNetStims = 0
for netStimLabel in netStimLabels:
netStimSpks = [spk for cell,stims in sim.allSimData['stims'].items() \
for stimLabel,stimSpks in stims.items() for spk in stimSpks if stimLabel == netStimLabel]
if len(netStimSpks) > 0:
# lastInd = max(spkinds) if len(spkinds)>0 else 0
lastInd = sel['spkind'].max() if len(sel['spkind']) > 0 else 0
spktsNew = netStimSpks
spkindsNew = [lastInd+1+i for i in range(len(netStimSpks))]
ns = pd.DataFrame(list(zip(spktsNew, spkindsNew)), columns=['spkt', 'spkind'])
ns['spkgidColor'] = popColors['netStims']
sel = pd.concat([sel, ns])
numNetStims += 1
else:
pass
#print netStimLabel+' produced no spikes'
if len(cellGids)>0 and numNetStims:
ylabelText = ylabelText + ' and NetStims (at the end)'
elif numNetStims:
ylabelText = ylabelText + 'NetStims'
if numCellSpks+numNetStims == 0:
print('No spikes available to plot raster')
return None
# Time Range
if timeRange == [0,sim.cfg.duration]:
pass
elif timeRange is None:
timeRange = [0,sim.cfg.duration]
else:
sel = sel.query('spkt >= @timeRange[0] and spkt <= @timeRange[1]')
# Limit to maxSpikes
if (len(sel)>maxSpikes):
print((' Showing only the first %i out of %i spikes' % (maxSpikes, len(sel)))) # Limit num of spikes
if numNetStims: # sort first if have netStims
sel = sel.sort_values(by='spkt')
sel = sel.iloc[:maxSpikes]
timeRange[1] = sel['spkt'].max()
# Calculate spike histogram
if spikeHist:
histo = np.histogram(sel['spkt'].tolist(), bins = np.arange(timeRange[0], timeRange[1], spikeHistBin))
histoT = histo[1][:-1]+spikeHistBin/2
histoCount = histo[0]
# Plot spikes
# set font size
plt.rcParams.update({'font.size': fontSize})
fig,ax1 = plt.subplots(figsize=figSize)
fontsiz = fontSize
if spikeHist == 'subplot':
gs = gridspec.GridSpec(2, 1,height_ratios=[2,1])
ax1=plt.subplot(gs[0])
sel['spkt'] = sel['spkt'].apply(pd.to_numeric)
sel.plot.scatter(ax=ax1, x='spkt', y='spkind', lw=lw, s=markerSize, marker=marker, c=sel['spkgidColor'].tolist()) # Create raster
ax1.set_xlim(timeRange)
# Plot stats
gidPops = df['pop'].tolist()
popNumCells = [float(gidPops.count(pop)) for pop in popLabels] if numCellSpks else [0] * len(popLabels)
totalSpikes = len(sel)
totalConnections = sum([len(conns) for conns in df['conns']])
numCells = len(cells)
firingRate = float(totalSpikes)/(numCells+numNetStims)/(timeRange[1]-timeRange[0])*1e3 if totalSpikes>0 else 0 # Calculate firing rate
connsPerCell = totalConnections/float(numCells) if numCells>0 else 0 # Calculate the number of connections per cell
if popRates:
avgRates = {}
tsecs = (timeRange[1]-timeRange[0])/1e3
for i,(pop, popNum) in enumerate(zip(popLabels, popNumCells)):
if numCells > 0 and pop != 'NetStims':
if numCellSpks == 0:
avgRates[pop] = 0
else:
avgRates[pop] = len([spkid for spkid in sel['spkind'].iloc[:numCellSpks-1] if df['pop'].iloc[int(spkid)]==pop])/popNum/tsecs
if numNetStims:
popNumCells[-1] = numNetStims
avgRates['NetStims'] = len([spkid for spkid in sel['spkind'].iloc[numCellSpks:]])/numNetStims/tsecs
# Plot synchrony lines
if syncLines:
for spkt in sel['spkt'].tolist():
ax1.plot((spkt, spkt), (0, len(cells)+numNetStims), 'r-', linewidth=0.1)
plt.title('cells=%i syns/cell=%0.1f rate=%0.1f Hz sync=%0.2f' % (numCells,connsPerCell,firingRate,syncMeasure()), fontsize=fontsiz)
else:
plt.title('cells=%i syns/cell=%0.1f rate=%0.1f Hz' % (numCells,connsPerCell,firingRate), fontsize=fontsiz)
# Axis
ax1.set_xlabel('Time (ms)', fontsize=fontsiz)
ax1.set_ylabel(ylabelText, fontsize=fontsiz)
ax1.set_xlim(timeRange)
ax1.set_ylim(-1, len(cells)+numNetStims+1)
# Add legend
if popRates:
popLabelRates = [popLabel + ' (%.3g Hz)'%(avgRates[popLabel]) for popLabel in popLabels if popLabel in avgRates]
if labels == 'legend':
for ipop,popLabel in enumerate(popLabels):
label = popLabelRates[ipop] if popRates else popLabel
plt.plot(0,0,color=popColors[popLabel],label=label)
plt.legend(fontsize=fontsiz, bbox_to_anchor=(1.04, 1), loc=2, borderaxespad=0.)
maxLabelLen = max([len(l) for l in popLabels])
rightOffset = 0.85 if popRates else 0.9
plt.subplots_adjust(right=(rightOffset-0.012*maxLabelLen))
elif labels == 'overlay':
ax = plt.gca()
tx = 1.01
margin = 1.0/numCells/2
minSpacing = float(fontsiz) * 1.1 / float((0.8*figSize[1]*dpi))
tys = [(float(popLen)/numCells)*(1-2*margin) for popLen in popNumCells]
tysOffset = list(scipy.cumsum(tys))[:-1]
tysOffset = [tysOffset[0]] +[tysOffset[i] + max(tysOffset[i+1]-tysOffset[i], minSpacing) for i in range(len(tysOffset)-1)]
tysOffset.insert(0, 0)
labels = popLabelRates if popRates else popLabels
for ipop,(ty, tyOffset, popLabel) in enumerate(zip(tys, tysOffset, popLabels)):
label = popLabelRates[ipop] if popRates else popLabel
if orderInverse:
finalty = 1.0 - (tyOffset + ty/2.0 + 0.01)
else:
finalty = tyOffset + ty/2.0 - 0.01
plt.text(tx, finalty, label, transform=ax.transAxes, fontsize=fontsiz, color=popColors[popLabel])
maxLabelLen = min(6, max([len(l) for l in labels]))
plt.subplots_adjust(right=(0.95-0.011*maxLabelLen))
# Plot spike hist
if spikeHist == 'overlay':
ax2 = ax1.twinx()
ax2.plot (histoT, histoCount, linewidth=0.5)
ax2.set_ylabel('Spike count', fontsize=fontsiz) # add yaxis label in opposite side
ax2.set_xlim(timeRange)
elif spikeHist == 'subplot':
ax2=plt.subplot(gs[1])
ax2.plot (histoT, histoCount, linewidth=1.0)
ax2.set_xlabel('Time (ms)', fontsize=fontsiz)
ax2.set_ylabel('Spike count', fontsize=fontsiz)
ax2.set_xlim(timeRange)
if orderInverse: plt.gca().invert_yaxis()
# save figure data
if saveData:
figData = {'spkTimes': sel['spkt'].tolist(), 'spkInds': sel['spkind'].tolist(), 'spkColors': sel['spkgidColor'].tolist(), 'cellGids': cellGids, 'sortedGids': df.index.tolist(), 'numNetStims': numNetStims,
'include': include, 'timeRange': timeRange, 'maxSpikes': maxSpikes, 'orderBy': orderBy, 'orderInverse': orderInverse, 'spikeHist': spikeHist,
'syncLines': syncLines}
_saveFigData(figData, saveData, 'raster')
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'raster.png'
plt.savefig(filename, dpi=dpi)
# show fig
if showFig: _showFigure()
return fig, {'include': include, 'spkts': spkts, 'spkinds': sel['spkind'].tolist(), 'timeRange': timeRange} | Raster plot of network cells
- include (['all',|'allCells',|'allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to include (default: 'allCells')
- timeRange ([start:stop]): Time range of spikes shown; if None shows all (default: None)
- maxSpikes (int): maximum number of spikes that will be plotted (default: 1e8)
- orderBy ('gid'|'y'|'ynorm'|...): Unique numeric cell property to order y-axis by, e.g. 'gid', 'ynorm', 'y' (default: 'gid')
- orderInverse (True|False): Invert the y-axis order (default: False)
- labels = ('legend', 'overlay'): Show population labels in a legend or overlayed on one side of raster (default: 'legend')
- popRates = (True|False): Include population rates (default: False)
- spikeHist (None|'overlay'|'subplot'): overlay line over raster showing spike histogram (spikes/bin) (default: False)
- spikeHistBin (int): Size of bin in ms to use for histogram (default: 5)
- syncLines (True|False): calculate synchorny measure and plot vertical lines for each spike to evidence synchrony (default: False)
- lw (integer): Line width for each spike (default: 2)
- marker (char): Marker for each spike (default: '|')
- popColors (odict): Dictionary with color (value) used for each population (key) (default: None)
- figSize ((width, height)): Size of figure (default: (10,8))
- dpi (int): Dots per inch to save fig (default: 100)
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure (default: None)
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handle | Below is the the instruction that describes the task:
### Input:
Raster plot of network cells
- include (['all',|'allCells',|'allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to include (default: 'allCells')
- timeRange ([start:stop]): Time range of spikes shown; if None shows all (default: None)
- maxSpikes (int): maximum number of spikes that will be plotted (default: 1e8)
- orderBy ('gid'|'y'|'ynorm'|...): Unique numeric cell property to order y-axis by, e.g. 'gid', 'ynorm', 'y' (default: 'gid')
- orderInverse (True|False): Invert the y-axis order (default: False)
- labels = ('legend', 'overlay'): Show population labels in a legend or overlayed on one side of raster (default: 'legend')
- popRates = (True|False): Include population rates (default: False)
- spikeHist (None|'overlay'|'subplot'): overlay line over raster showing spike histogram (spikes/bin) (default: False)
- spikeHistBin (int): Size of bin in ms to use for histogram (default: 5)
- syncLines (True|False): calculate synchorny measure and plot vertical lines for each spike to evidence synchrony (default: False)
- lw (integer): Line width for each spike (default: 2)
- marker (char): Marker for each spike (default: '|')
- popColors (odict): Dictionary with color (value) used for each population (key) (default: None)
- figSize ((width, height)): Size of figure (default: (10,8))
- dpi (int): Dots per inch to save fig (default: 100)
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure (default: None)
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handle
### Response:
def plotRaster (include = ['allCells'], timeRange = None, maxSpikes = 1e8, orderBy = 'gid', orderInverse = False, labels = 'legend', popRates = False,
spikeHist=None, spikeHistBin=5, syncLines=False, lw=2, marker='|', markerSize=5, popColors=None, figSize=(10, 8), fontSize=12,
dpi = 100, saveData = None, saveFig = None, showFig = True):
'''
Raster plot of network cells
- include (['all',|'allCells',|'allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to include (default: 'allCells')
- timeRange ([start:stop]): Time range of spikes shown; if None shows all (default: None)
- maxSpikes (int): maximum number of spikes that will be plotted (default: 1e8)
- orderBy ('gid'|'y'|'ynorm'|...): Unique numeric cell property to order y-axis by, e.g. 'gid', 'ynorm', 'y' (default: 'gid')
- orderInverse (True|False): Invert the y-axis order (default: False)
- labels = ('legend', 'overlay'): Show population labels in a legend or overlayed on one side of raster (default: 'legend')
- popRates = (True|False): Include population rates (default: False)
- spikeHist (None|'overlay'|'subplot'): overlay line over raster showing spike histogram (spikes/bin) (default: False)
- spikeHistBin (int): Size of bin in ms to use for histogram (default: 5)
- syncLines (True|False): calculate synchorny measure and plot vertical lines for each spike to evidence synchrony (default: False)
- lw (integer): Line width for each spike (default: 2)
- marker (char): Marker for each spike (default: '|')
- popColors (odict): Dictionary with color (value) used for each population (key) (default: None)
- figSize ((width, height)): Size of figure (default: (10,8))
- dpi (int): Dots per inch to save fig (default: 100)
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure (default: None)
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handle
'''
from .. import sim
print('Plotting raster...')
# Select cells to include
cells, cellGids, netStimLabels = getCellsInclude(include)
df = pd.DataFrame.from_records(cells)
df = pd.concat([df.drop('tags', axis=1), pd.DataFrame.from_records(df['tags'].tolist())], axis=1)
keep = ['pop', 'gid', 'conns']
if isinstance(orderBy, basestring) and orderBy not in cells[0]['tags']: # if orderBy property doesn't exist or is not numeric, use gid
orderBy = 'gid'
elif orderBy is 'pop':
df['popInd'] = df['pop'].astype('category')
df['popInd'].cat.set_categories(sim.net.pops.keys(), inplace=True)
orderBy='popInd'
elif isinstance(orderBy, basestring) and not isinstance(cells[0]['tags'][orderBy], Number):
orderBy = 'gid'
if isinstance(orderBy, list):
if 'pop' in orderBy:
df['popInd'] = df['pop'].astype('category')
df['popInd'].cat.set_categories(sim.net.pops.keys(), inplace=True)
orderBy[orderBy.index('pop')] = 'popInd'
keep = keep + list(set(orderBy) - set(keep))
elif orderBy not in keep:
keep.append(orderBy)
df = df[keep]
popLabels = [pop for pop in sim.net.allPops if pop in df['pop'].unique()] #preserves original ordering
if netStimLabels: popLabels.append('NetStims')
popColorsTmp = {popLabel: colorList[ipop%len(colorList)] for ipop,popLabel in enumerate(popLabels)} # dict with color for each pop
if popColors: popColorsTmp.update(popColors)
popColors = popColorsTmp
if len(cellGids) > 0:
gidColors = {cell['gid']: popColors[cell['tags']['pop']] for cell in cells} # dict with color for each gid
try:
sel, spkts,spkgids = getSpktSpkid(cellGids=cellGids, timeRange=timeRange, allCells=(include == ['allCells']))
except:
import sys
print((sys.exc_info()))
spkgids, spkts = [], []
sel = pd.DataFrame(columns=['spkt', 'spkid'])
sel['spkgidColor'] = sel['spkid'].map(gidColors)
df['gidColor'] = df['pop'].map(popColors)
df.set_index('gid', inplace=True)
# Order by
if len(df) > 0:
ylabelText = 'Cells (ordered by %s)'%(orderBy)
df = df.sort_values(by=orderBy)
sel['spkind'] = sel['spkid'].apply(df.index.get_loc)
else:
sel = pd.DataFrame(columns=['spkt', 'spkid', 'spkind'])
ylabelText = ''
# Add NetStim spikes
numCellSpks = len(sel)
numNetStims = 0
for netStimLabel in netStimLabels:
netStimSpks = [spk for cell,stims in sim.allSimData['stims'].items() \
for stimLabel,stimSpks in stims.items() for spk in stimSpks if stimLabel == netStimLabel]
if len(netStimSpks) > 0:
# lastInd = max(spkinds) if len(spkinds)>0 else 0
lastInd = sel['spkind'].max() if len(sel['spkind']) > 0 else 0
spktsNew = netStimSpks
spkindsNew = [lastInd+1+i for i in range(len(netStimSpks))]
ns = pd.DataFrame(list(zip(spktsNew, spkindsNew)), columns=['spkt', 'spkind'])
ns['spkgidColor'] = popColors['netStims']
sel = pd.concat([sel, ns])
numNetStims += 1
else:
pass
#print netStimLabel+' produced no spikes'
if len(cellGids)>0 and numNetStims:
ylabelText = ylabelText + ' and NetStims (at the end)'
elif numNetStims:
ylabelText = ylabelText + 'NetStims'
if numCellSpks+numNetStims == 0:
print('No spikes available to plot raster')
return None
# Time Range
if timeRange == [0,sim.cfg.duration]:
pass
elif timeRange is None:
timeRange = [0,sim.cfg.duration]
else:
sel = sel.query('spkt >= @timeRange[0] and spkt <= @timeRange[1]')
# Limit to maxSpikes
if (len(sel)>maxSpikes):
print((' Showing only the first %i out of %i spikes' % (maxSpikes, len(sel)))) # Limit num of spikes
if numNetStims: # sort first if have netStims
sel = sel.sort_values(by='spkt')
sel = sel.iloc[:maxSpikes]
timeRange[1] = sel['spkt'].max()
# Calculate spike histogram
if spikeHist:
histo = np.histogram(sel['spkt'].tolist(), bins = np.arange(timeRange[0], timeRange[1], spikeHistBin))
histoT = histo[1][:-1]+spikeHistBin/2
histoCount = histo[0]
# Plot spikes
# set font size
plt.rcParams.update({'font.size': fontSize})
fig,ax1 = plt.subplots(figsize=figSize)
fontsiz = fontSize
if spikeHist == 'subplot':
gs = gridspec.GridSpec(2, 1,height_ratios=[2,1])
ax1=plt.subplot(gs[0])
sel['spkt'] = sel['spkt'].apply(pd.to_numeric)
sel.plot.scatter(ax=ax1, x='spkt', y='spkind', lw=lw, s=markerSize, marker=marker, c=sel['spkgidColor'].tolist()) # Create raster
ax1.set_xlim(timeRange)
# Plot stats
gidPops = df['pop'].tolist()
popNumCells = [float(gidPops.count(pop)) for pop in popLabels] if numCellSpks else [0] * len(popLabels)
totalSpikes = len(sel)
totalConnections = sum([len(conns) for conns in df['conns']])
numCells = len(cells)
firingRate = float(totalSpikes)/(numCells+numNetStims)/(timeRange[1]-timeRange[0])*1e3 if totalSpikes>0 else 0 # Calculate firing rate
connsPerCell = totalConnections/float(numCells) if numCells>0 else 0 # Calculate the number of connections per cell
if popRates:
avgRates = {}
tsecs = (timeRange[1]-timeRange[0])/1e3
for i,(pop, popNum) in enumerate(zip(popLabels, popNumCells)):
if numCells > 0 and pop != 'NetStims':
if numCellSpks == 0:
avgRates[pop] = 0
else:
avgRates[pop] = len([spkid for spkid in sel['spkind'].iloc[:numCellSpks-1] if df['pop'].iloc[int(spkid)]==pop])/popNum/tsecs
if numNetStims:
popNumCells[-1] = numNetStims
avgRates['NetStims'] = len([spkid for spkid in sel['spkind'].iloc[numCellSpks:]])/numNetStims/tsecs
# Plot synchrony lines
if syncLines:
for spkt in sel['spkt'].tolist():
ax1.plot((spkt, spkt), (0, len(cells)+numNetStims), 'r-', linewidth=0.1)
plt.title('cells=%i syns/cell=%0.1f rate=%0.1f Hz sync=%0.2f' % (numCells,connsPerCell,firingRate,syncMeasure()), fontsize=fontsiz)
else:
plt.title('cells=%i syns/cell=%0.1f rate=%0.1f Hz' % (numCells,connsPerCell,firingRate), fontsize=fontsiz)
# Axis
ax1.set_xlabel('Time (ms)', fontsize=fontsiz)
ax1.set_ylabel(ylabelText, fontsize=fontsiz)
ax1.set_xlim(timeRange)
ax1.set_ylim(-1, len(cells)+numNetStims+1)
# Add legend
if popRates:
popLabelRates = [popLabel + ' (%.3g Hz)'%(avgRates[popLabel]) for popLabel in popLabels if popLabel in avgRates]
if labels == 'legend':
for ipop,popLabel in enumerate(popLabels):
label = popLabelRates[ipop] if popRates else popLabel
plt.plot(0,0,color=popColors[popLabel],label=label)
plt.legend(fontsize=fontsiz, bbox_to_anchor=(1.04, 1), loc=2, borderaxespad=0.)
maxLabelLen = max([len(l) for l in popLabels])
rightOffset = 0.85 if popRates else 0.9
plt.subplots_adjust(right=(rightOffset-0.012*maxLabelLen))
elif labels == 'overlay':
ax = plt.gca()
tx = 1.01
margin = 1.0/numCells/2
minSpacing = float(fontsiz) * 1.1 / float((0.8*figSize[1]*dpi))
tys = [(float(popLen)/numCells)*(1-2*margin) for popLen in popNumCells]
tysOffset = list(scipy.cumsum(tys))[:-1]
tysOffset = [tysOffset[0]] +[tysOffset[i] + max(tysOffset[i+1]-tysOffset[i], minSpacing) for i in range(len(tysOffset)-1)]
tysOffset.insert(0, 0)
labels = popLabelRates if popRates else popLabels
for ipop,(ty, tyOffset, popLabel) in enumerate(zip(tys, tysOffset, popLabels)):
label = popLabelRates[ipop] if popRates else popLabel
if orderInverse:
finalty = 1.0 - (tyOffset + ty/2.0 + 0.01)
else:
finalty = tyOffset + ty/2.0 - 0.01
plt.text(tx, finalty, label, transform=ax.transAxes, fontsize=fontsiz, color=popColors[popLabel])
maxLabelLen = min(6, max([len(l) for l in labels]))
plt.subplots_adjust(right=(0.95-0.011*maxLabelLen))
# Plot spike hist
if spikeHist == 'overlay':
ax2 = ax1.twinx()
ax2.plot (histoT, histoCount, linewidth=0.5)
ax2.set_ylabel('Spike count', fontsize=fontsiz) # add yaxis label in opposite side
ax2.set_xlim(timeRange)
elif spikeHist == 'subplot':
ax2=plt.subplot(gs[1])
ax2.plot (histoT, histoCount, linewidth=1.0)
ax2.set_xlabel('Time (ms)', fontsize=fontsiz)
ax2.set_ylabel('Spike count', fontsize=fontsiz)
ax2.set_xlim(timeRange)
if orderInverse: plt.gca().invert_yaxis()
# save figure data
if saveData:
figData = {'spkTimes': sel['spkt'].tolist(), 'spkInds': sel['spkind'].tolist(), 'spkColors': sel['spkgidColor'].tolist(), 'cellGids': cellGids, 'sortedGids': df.index.tolist(), 'numNetStims': numNetStims,
'include': include, 'timeRange': timeRange, 'maxSpikes': maxSpikes, 'orderBy': orderBy, 'orderInverse': orderInverse, 'spikeHist': spikeHist,
'syncLines': syncLines}
_saveFigData(figData, saveData, 'raster')
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'raster.png'
plt.savefig(filename, dpi=dpi)
# show fig
if showFig: _showFigure()
return fig, {'include': include, 'spkts': spkts, 'spkinds': sel['spkind'].tolist(), 'timeRange': timeRange} |
def increment(key, delta=1, host=DEFAULT_HOST, port=DEFAULT_PORT):
'''
Increment the value of a key
CLI Example:
.. code-block:: bash
salt '*' memcached.increment <key>
salt '*' memcached.increment <key> 2
'''
conn = _connect(host, port)
_check_stats(conn)
cur = get(key)
if cur is None:
raise CommandExecutionError('Key \'{0}\' does not exist'.format(key))
elif not isinstance(cur, six.integer_types):
raise CommandExecutionError(
'Value for key \'{0}\' must be an integer to be '
'incremented'.format(key)
)
try:
return conn.incr(key, delta)
except ValueError:
raise SaltInvocationError('Delta value must be an integer') | Increment the value of a key
CLI Example:
.. code-block:: bash
salt '*' memcached.increment <key>
salt '*' memcached.increment <key> 2 | Below is the the instruction that describes the task:
### Input:
Increment the value of a key
CLI Example:
.. code-block:: bash
salt '*' memcached.increment <key>
salt '*' memcached.increment <key> 2
### Response:
def increment(key, delta=1, host=DEFAULT_HOST, port=DEFAULT_PORT):
'''
Increment the value of a key
CLI Example:
.. code-block:: bash
salt '*' memcached.increment <key>
salt '*' memcached.increment <key> 2
'''
conn = _connect(host, port)
_check_stats(conn)
cur = get(key)
if cur is None:
raise CommandExecutionError('Key \'{0}\' does not exist'.format(key))
elif not isinstance(cur, six.integer_types):
raise CommandExecutionError(
'Value for key \'{0}\' must be an integer to be '
'incremented'.format(key)
)
try:
return conn.incr(key, delta)
except ValueError:
raise SaltInvocationError('Delta value must be an integer') |
def get_object_references(tb, source, max_string_length=1000):
"""
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line)))
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string))
return info | Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value) | Below is the the instruction that describes the task:
### Input:
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
### Response:
def get_object_references(tb, source, max_string_length=1000):
"""
Find the values of referenced attributes of objects within the traceback scope.
:param tb: traceback
:return: list of tuples containing (variable name, value)
"""
global obj_ref_regex
referenced_attr = set()
for line in source.split('\n'):
referenced_attr.update(set(re.findall(obj_ref_regex, line)))
referenced_attr = sorted(referenced_attr)
info = []
for attr in referenced_attr:
v = string_variable_lookup(tb, attr)
if v is not ValueError:
ref_string = format_reference(v, max_string_length=max_string_length)
info.append((attr, ref_string))
return info |
def transform(self, Z):
"""TODO: rewrite docstring
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
if isinstance(Z, DictRDD):
X = Z[:, 'X']
else:
X = Z
Zs = [_transform_one(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list]
X_rdd = reduce(lambda x, y: x.zip(y._rdd), Zs)
X_rdd = X_rdd.map(flatten)
mapper = np.hstack
for item in X_rdd.first():
if sp.issparse(item):
mapper = sp.hstack
X_rdd = X_rdd.map(lambda x: mapper(x))
if isinstance(Z, DictRDD):
return DictRDD([X_rdd, Z[:, 'y']],
columns=Z.columns,
dtype=Z.dtype,
bsize=Z.bsize)
else:
return X_rdd | TODO: rewrite docstring
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. | Below is the the instruction that describes the task:
### Input:
TODO: rewrite docstring
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
### Response:
def transform(self, Z):
"""TODO: rewrite docstring
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
if isinstance(Z, DictRDD):
X = Z[:, 'X']
else:
X = Z
Zs = [_transform_one(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list]
X_rdd = reduce(lambda x, y: x.zip(y._rdd), Zs)
X_rdd = X_rdd.map(flatten)
mapper = np.hstack
for item in X_rdd.first():
if sp.issparse(item):
mapper = sp.hstack
X_rdd = X_rdd.map(lambda x: mapper(x))
if isinstance(Z, DictRDD):
return DictRDD([X_rdd, Z[:, 'y']],
columns=Z.columns,
dtype=Z.dtype,
bsize=Z.bsize)
else:
return X_rdd |
def get_orgs(self):
"""
:calls: `GET /user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization`
"""
return github.PaginatedList.PaginatedList(
github.Organization.Organization,
self._requester,
"/user/orgs",
None
) | :calls: `GET /user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization` | Below is the the instruction that describes the task:
### Input:
:calls: `GET /user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization`
### Response:
def get_orgs(self):
"""
:calls: `GET /user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization`
"""
return github.PaginatedList.PaginatedList(
github.Organization.Organization,
self._requester,
"/user/orgs",
None
) |
def deactivate_(self):
"""Init shmem variables to None
"""
self.preDeactivate_()
self.active = False
self.image_dimensions = None
self.client = None | Init shmem variables to None | Below is the the instruction that describes the task:
### Input:
Init shmem variables to None
### Response:
def deactivate_(self):
"""Init shmem variables to None
"""
self.preDeactivate_()
self.active = False
self.image_dimensions = None
self.client = None |
def _claskey(obj, style):
'''Wrap an old- or new-style class object.
'''
i = id(obj)
k = _claskeys.get(i, None)
if not k:
_claskeys[i] = k = _Claskey(obj, style)
return k | Wrap an old- or new-style class object. | Below is the the instruction that describes the task:
### Input:
Wrap an old- or new-style class object.
### Response:
def _claskey(obj, style):
'''Wrap an old- or new-style class object.
'''
i = id(obj)
k = _claskeys.get(i, None)
if not k:
_claskeys[i] = k = _Claskey(obj, style)
return k |
def lookup(self, short_url):
'''
Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance)
'''
if short_url is None or not isinstance(short_url, basestring) or len(short_url.strip()) == 0:
raise GDMalformedURLError('The shortened URL must be a non empty string')
# Build data for porst
data = {
'format': 'json',
'shorturl': short_url
}
opener = urllib2.build_opener()
headers = { 'User-Agent' : self._user_agent }
req = urllib2.Request("{0}/forward.php".format(self.shortener_url), urllib.urlencode(data), headers)
f_desc = opener.open(req, timeout = self._timeout)
response = json.loads(f_desc.read())
if 'url' in response:
# Success!
return HTMLParser.HTMLParser().unescape(urllib.unquote(response['url']))
else:
# Error
error_code = int(response['errorcode'])
error_description = str(response['errormessage'])
if error_code == 1:
raise GDMalformedURLError(error_description)
if error_code == 2:
raise GDShortURLError(error_description)
if error_code == 3:
raise GDRateLimitError(error_description)
if error_code == 4:
raise GDGenericError(error_description) | Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance) | Below is the the instruction that describes the task:
### Input:
Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance)
### Response:
def lookup(self, short_url):
'''
Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance)
'''
if short_url is None or not isinstance(short_url, basestring) or len(short_url.strip()) == 0:
raise GDMalformedURLError('The shortened URL must be a non empty string')
# Build data for porst
data = {
'format': 'json',
'shorturl': short_url
}
opener = urllib2.build_opener()
headers = { 'User-Agent' : self._user_agent }
req = urllib2.Request("{0}/forward.php".format(self.shortener_url), urllib.urlencode(data), headers)
f_desc = opener.open(req, timeout = self._timeout)
response = json.loads(f_desc.read())
if 'url' in response:
# Success!
return HTMLParser.HTMLParser().unescape(urllib.unquote(response['url']))
else:
# Error
error_code = int(response['errorcode'])
error_description = str(response['errormessage'])
if error_code == 1:
raise GDMalformedURLError(error_description)
if error_code == 2:
raise GDShortURLError(error_description)
if error_code == 3:
raise GDRateLimitError(error_description)
if error_code == 4:
raise GDGenericError(error_description) |
def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None):
"""Plot an ARIMA's diagnostics.
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : integer, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : integer, optional
Number of lags to include in the correlogram. Default is 10.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residulas, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
pmdarima.utils.visualization.plot_acf
References
----------
.. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501
"""
# implicitly checks whether installed, and does our backend magic:
_get_plt()
# We originally delegated down to SARIMAX model wrapper, but
# statsmodels makes it difficult to trust their API, so we just re-
# implemented a common method for all results wrappers.
from statsmodels.graphics.utils import create_mpl_fig
fig = create_mpl_fig(fig, figsize)
res_wpr = self.arima_res_
data = res_wpr.data
# Eliminate residuals associated with burned or diffuse likelihoods.
# The statsmodels code for the Kalman Filter takes the loglik_burn
# as a parameter:
# loglikelihood_burn : int, optional
# The number of initial periods during which the loglikelihood is
# not recorded. Default is 0.
# If the class has it, it's a SARIMAX and we'll use it. Otherwise we
# will just access the residuals as we normally would...
if hasattr(res_wpr, 'loglikelihood_burn'):
# This is introduced in the bleeding edge version, but is not
# backwards compatible with 0.9.0 and less:
d = res_wpr.loglikelihood_burn
if hasattr(res_wpr, 'nobs_diffuse'):
d = np.maximum(d, res_wpr.nobs_diffuse)
resid = res_wpr.filter_results\
.standardized_forecasts_error[variable, d:]
else:
# This gets the residuals, but they need to be standardized
d = 0
r = res_wpr.resid
resid = (r - np.nanmean(r)) / np.nanstd(r)
# Top-left: residuals vs time
ax = fig.add_subplot(221)
if hasattr(data, 'dates') and data.dates is not None:
x = data.dates[d:]._mpl_repr()
else:
x = np.arange(len(resid))
ax.plot(x, resid)
ax.hlines(0, x[0], x[-1], alpha=0.5)
ax.set_xlim(x[0], x[-1])
ax.set_title('Standardized residual')
# Top-right: histogram, Gaussian kernel density, Normal density
# Can only do histogram and Gaussian kernel density on the non-null
# elements
resid_nonmissing = resid[~(np.isnan(resid))]
ax = fig.add_subplot(222)
# temporarily disable Deprecation warning, normed -> density
# hist needs to use `density` in future when minimum matplotlib has it
with warnings.catch_warnings(record=True):
ax.hist(resid_nonmissing, normed=True, label='Hist')
kde = gaussian_kde(resid_nonmissing)
xlim = (-1.96 * 2, 1.96 * 2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label='KDE')
ax.plot(x, norm.pdf(x), label='N(0,1)')
ax.set_xlim(xlim)
ax.legend()
ax.set_title('Histogram plus estimated density')
# Bottom-left: QQ plot
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(resid_nonmissing, line='s', ax=ax)
ax.set_title('Normal Q-Q')
# Bottom-right: Correlogram
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(resid, ax=ax, lags=lags)
ax.set_title('Correlogram')
ax.set_ylim(-1, 1)
return fig | Plot an ARIMA's diagnostics.
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : integer, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : integer, optional
Number of lags to include in the correlogram. Default is 10.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residulas, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
pmdarima.utils.visualization.plot_acf
References
----------
.. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501 | Below is the the instruction that describes the task:
### Input:
Plot an ARIMA's diagnostics.
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : integer, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : integer, optional
Number of lags to include in the correlogram. Default is 10.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residulas, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
pmdarima.utils.visualization.plot_acf
References
----------
.. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501
### Response:
def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None):
"""Plot an ARIMA's diagnostics.
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : integer, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : integer, optional
Number of lags to include in the correlogram. Default is 10.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residulas, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
pmdarima.utils.visualization.plot_acf
References
----------
.. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501
"""
# implicitly checks whether installed, and does our backend magic:
_get_plt()
# We originally delegated down to SARIMAX model wrapper, but
# statsmodels makes it difficult to trust their API, so we just re-
# implemented a common method for all results wrappers.
from statsmodels.graphics.utils import create_mpl_fig
fig = create_mpl_fig(fig, figsize)
res_wpr = self.arima_res_
data = res_wpr.data
# Eliminate residuals associated with burned or diffuse likelihoods.
# The statsmodels code for the Kalman Filter takes the loglik_burn
# as a parameter:
# loglikelihood_burn : int, optional
# The number of initial periods during which the loglikelihood is
# not recorded. Default is 0.
# If the class has it, it's a SARIMAX and we'll use it. Otherwise we
# will just access the residuals as we normally would...
if hasattr(res_wpr, 'loglikelihood_burn'):
# This is introduced in the bleeding edge version, but is not
# backwards compatible with 0.9.0 and less:
d = res_wpr.loglikelihood_burn
if hasattr(res_wpr, 'nobs_diffuse'):
d = np.maximum(d, res_wpr.nobs_diffuse)
resid = res_wpr.filter_results\
.standardized_forecasts_error[variable, d:]
else:
# This gets the residuals, but they need to be standardized
d = 0
r = res_wpr.resid
resid = (r - np.nanmean(r)) / np.nanstd(r)
# Top-left: residuals vs time
ax = fig.add_subplot(221)
if hasattr(data, 'dates') and data.dates is not None:
x = data.dates[d:]._mpl_repr()
else:
x = np.arange(len(resid))
ax.plot(x, resid)
ax.hlines(0, x[0], x[-1], alpha=0.5)
ax.set_xlim(x[0], x[-1])
ax.set_title('Standardized residual')
# Top-right: histogram, Gaussian kernel density, Normal density
# Can only do histogram and Gaussian kernel density on the non-null
# elements
resid_nonmissing = resid[~(np.isnan(resid))]
ax = fig.add_subplot(222)
# temporarily disable Deprecation warning, normed -> density
# hist needs to use `density` in future when minimum matplotlib has it
with warnings.catch_warnings(record=True):
ax.hist(resid_nonmissing, normed=True, label='Hist')
kde = gaussian_kde(resid_nonmissing)
xlim = (-1.96 * 2, 1.96 * 2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label='KDE')
ax.plot(x, norm.pdf(x), label='N(0,1)')
ax.set_xlim(xlim)
ax.legend()
ax.set_title('Histogram plus estimated density')
# Bottom-left: QQ plot
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(resid_nonmissing, line='s', ax=ax)
ax.set_title('Normal Q-Q')
# Bottom-right: Correlogram
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(resid, ax=ax, lags=lags)
ax.set_title('Correlogram')
ax.set_ylim(-1, 1)
return fig |
def search(
self,
search_space,
valid_data,
init_args=[],
train_args=[],
init_kwargs={},
train_kwargs={},
module_args={},
module_kwargs={},
max_search=None,
shuffle=True,
verbose=True,
**score_kwargs,
):
"""
Args:
search_space: see config_generator() documentation
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
init_args: (list) positional args for initializing the model
train_args: (list) positional args for training the model
init_kwargs: (dict) keyword args for initializing the model
train_kwargs: (dict) keyword args for training the model
module_args: (dict) Dictionary of lists of module args
module_kwargs: (dict) Dictionary of dictionaries of module kwargs
max_search: see config_generator() documentation
shuffle: see config_generator() documentation
Returns:
best_model: the highest performing trained model
Note: Initialization is performed by ModelTuner instead of passing a
pre-initialized model so that tuning may be performed over all model
parameters, including the network architecture (which is defined before
the train loop).
"""
raise NotImplementedError() | Args:
search_space: see config_generator() documentation
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
init_args: (list) positional args for initializing the model
train_args: (list) positional args for training the model
init_kwargs: (dict) keyword args for initializing the model
train_kwargs: (dict) keyword args for training the model
module_args: (dict) Dictionary of lists of module args
module_kwargs: (dict) Dictionary of dictionaries of module kwargs
max_search: see config_generator() documentation
shuffle: see config_generator() documentation
Returns:
best_model: the highest performing trained model
Note: Initialization is performed by ModelTuner instead of passing a
pre-initialized model so that tuning may be performed over all model
parameters, including the network architecture (which is defined before
the train loop). | Below is the the instruction that describes the task:
### Input:
Args:
search_space: see config_generator() documentation
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
init_args: (list) positional args for initializing the model
train_args: (list) positional args for training the model
init_kwargs: (dict) keyword args for initializing the model
train_kwargs: (dict) keyword args for training the model
module_args: (dict) Dictionary of lists of module args
module_kwargs: (dict) Dictionary of dictionaries of module kwargs
max_search: see config_generator() documentation
shuffle: see config_generator() documentation
Returns:
best_model: the highest performing trained model
Note: Initialization is performed by ModelTuner instead of passing a
pre-initialized model so that tuning may be performed over all model
parameters, including the network architecture (which is defined before
the train loop).
### Response:
def search(
self,
search_space,
valid_data,
init_args=[],
train_args=[],
init_kwargs={},
train_kwargs={},
module_args={},
module_kwargs={},
max_search=None,
shuffle=True,
verbose=True,
**score_kwargs,
):
"""
Args:
search_space: see config_generator() documentation
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
init_args: (list) positional args for initializing the model
train_args: (list) positional args for training the model
init_kwargs: (dict) keyword args for initializing the model
train_kwargs: (dict) keyword args for training the model
module_args: (dict) Dictionary of lists of module args
module_kwargs: (dict) Dictionary of dictionaries of module kwargs
max_search: see config_generator() documentation
shuffle: see config_generator() documentation
Returns:
best_model: the highest performing trained model
Note: Initialization is performed by ModelTuner instead of passing a
pre-initialized model so that tuning may be performed over all model
parameters, including the network architecture (which is defined before
the train loop).
"""
raise NotImplementedError() |
def quickvolshow(
data,
lighting=False,
data_min=None,
data_max=None,
max_shape=256,
level=[0.1, 0.5, 0.9],
opacity=[0.01, 0.05, 0.1],
level_width=0.1,
extent=None,
memorder='C',
**kwargs
):
"""Visualize a 3d array using volume rendering.
:param data: 3d numpy array
:param lighting: boolean, to use lighting or not, if set to false, lighting parameters will be overriden
:param data_min: minimum value to consider for data, if None, computed using np.nanmin
:param data_max: maximum value to consider for data, if None, computed using np.nanmax
:param int max_shape: maximum shape for the 3d cube, if larger, the data is reduced by skipping/slicing (data[::N]),
set to None to disable.
:param extent: list of [[xmin, xmax], [ymin, ymax], [zmin, zmax]] values that define the bounds of the volume,
otherwise the viewport is used
:param level: level(s) for the where the opacity in the volume peaks, maximum sequence of length 3
:param opacity: opacity(ies) for each level, scalar or sequence of max length 3
:param level_width: width of the (gaussian) bumps where the opacity peaks, scalar or sequence of max length 3
:param kwargs: extra argument passed to Volume and default transfer function
:return:
"""
ipv.figure()
ipv.volshow(
data,
lighting=lighting,
data_min=data_min,
data_max=data_max,
max_shape=max_shape,
level=level,
opacity=opacity,
level_width=level_width,
extent=extent,
memorder=memorder,
**kwargs
)
return ipv.gcc() | Visualize a 3d array using volume rendering.
:param data: 3d numpy array
:param lighting: boolean, to use lighting or not, if set to false, lighting parameters will be overriden
:param data_min: minimum value to consider for data, if None, computed using np.nanmin
:param data_max: maximum value to consider for data, if None, computed using np.nanmax
:param int max_shape: maximum shape for the 3d cube, if larger, the data is reduced by skipping/slicing (data[::N]),
set to None to disable.
:param extent: list of [[xmin, xmax], [ymin, ymax], [zmin, zmax]] values that define the bounds of the volume,
otherwise the viewport is used
:param level: level(s) for the where the opacity in the volume peaks, maximum sequence of length 3
:param opacity: opacity(ies) for each level, scalar or sequence of max length 3
:param level_width: width of the (gaussian) bumps where the opacity peaks, scalar or sequence of max length 3
:param kwargs: extra argument passed to Volume and default transfer function
:return: | Below is the the instruction that describes the task:
### Input:
Visualize a 3d array using volume rendering.
:param data: 3d numpy array
:param lighting: boolean, to use lighting or not, if set to false, lighting parameters will be overriden
:param data_min: minimum value to consider for data, if None, computed using np.nanmin
:param data_max: maximum value to consider for data, if None, computed using np.nanmax
:param int max_shape: maximum shape for the 3d cube, if larger, the data is reduced by skipping/slicing (data[::N]),
set to None to disable.
:param extent: list of [[xmin, xmax], [ymin, ymax], [zmin, zmax]] values that define the bounds of the volume,
otherwise the viewport is used
:param level: level(s) for the where the opacity in the volume peaks, maximum sequence of length 3
:param opacity: opacity(ies) for each level, scalar or sequence of max length 3
:param level_width: width of the (gaussian) bumps where the opacity peaks, scalar or sequence of max length 3
:param kwargs: extra argument passed to Volume and default transfer function
:return:
### Response:
def quickvolshow(
data,
lighting=False,
data_min=None,
data_max=None,
max_shape=256,
level=[0.1, 0.5, 0.9],
opacity=[0.01, 0.05, 0.1],
level_width=0.1,
extent=None,
memorder='C',
**kwargs
):
"""Visualize a 3d array using volume rendering.
:param data: 3d numpy array
:param lighting: boolean, to use lighting or not, if set to false, lighting parameters will be overriden
:param data_min: minimum value to consider for data, if None, computed using np.nanmin
:param data_max: maximum value to consider for data, if None, computed using np.nanmax
:param int max_shape: maximum shape for the 3d cube, if larger, the data is reduced by skipping/slicing (data[::N]),
set to None to disable.
:param extent: list of [[xmin, xmax], [ymin, ymax], [zmin, zmax]] values that define the bounds of the volume,
otherwise the viewport is used
:param level: level(s) for the where the opacity in the volume peaks, maximum sequence of length 3
:param opacity: opacity(ies) for each level, scalar or sequence of max length 3
:param level_width: width of the (gaussian) bumps where the opacity peaks, scalar or sequence of max length 3
:param kwargs: extra argument passed to Volume and default transfer function
:return:
"""
ipv.figure()
ipv.volshow(
data,
lighting=lighting,
data_min=data_min,
data_max=data_max,
max_shape=max_shape,
level=level,
opacity=opacity,
level_width=level_width,
extent=extent,
memorder=memorder,
**kwargs
)
return ipv.gcc() |
def hide_routemap_holder_route_map_content_set_ipv6_next_vrf_next_vrf_list_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
ipv6 = ET.SubElement(set, "ipv6")
next_vrf = ET.SubElement(ipv6, "next-vrf")
next_vrf_list = ET.SubElement(next_vrf, "next-vrf-list")
next_hop_key = ET.SubElement(next_vrf_list, "next-hop")
next_hop_key.text = kwargs.pop('next_hop')
vrf = ET.SubElement(next_vrf_list, "vrf")
vrf.text = kwargs.pop('vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def hide_routemap_holder_route_map_content_set_ipv6_next_vrf_next_vrf_list_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
ipv6 = ET.SubElement(set, "ipv6")
next_vrf = ET.SubElement(ipv6, "next-vrf")
next_vrf_list = ET.SubElement(next_vrf, "next-vrf-list")
next_hop_key = ET.SubElement(next_vrf_list, "next-hop")
next_hop_key.text = kwargs.pop('next_hop')
vrf = ET.SubElement(next_vrf_list, "vrf")
vrf.text = kwargs.pop('vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def set_kind(query_proto, kind):
"""Set the kind constraint for the given datastore.Query proto message."""
del query_proto.kind[:]
query_proto.kind.add().name = kind | Set the kind constraint for the given datastore.Query proto message. | Below is the the instruction that describes the task:
### Input:
Set the kind constraint for the given datastore.Query proto message.
### Response:
def set_kind(query_proto, kind):
"""Set the kind constraint for the given datastore.Query proto message."""
del query_proto.kind[:]
query_proto.kind.add().name = kind |
def setWindowSize(self, winsz):
"""Sets the size of scroll window"""
self.tracePlot.setWindowSize(winsz)
self.stimPlot.setWindowSize(winsz) | Sets the size of scroll window | Below is the the instruction that describes the task:
### Input:
Sets the size of scroll window
### Response:
def setWindowSize(self, winsz):
"""Sets the size of scroll window"""
self.tracePlot.setWindowSize(winsz)
self.stimPlot.setWindowSize(winsz) |
def sort_top_targets(self, top, orders):
'''
Returns the sorted high data from the merged top files
'''
sorted_top = collections.defaultdict(OrderedDict)
# pylint: disable=cell-var-from-loop
for saltenv, targets in six.iteritems(top):
sorted_targets = sorted(targets,
key=lambda target: orders[saltenv][target])
for target in sorted_targets:
sorted_top[saltenv][target] = targets[target]
# pylint: enable=cell-var-from-loop
return sorted_top | Returns the sorted high data from the merged top files | Below is the the instruction that describes the task:
### Input:
Returns the sorted high data from the merged top files
### Response:
def sort_top_targets(self, top, orders):
'''
Returns the sorted high data from the merged top files
'''
sorted_top = collections.defaultdict(OrderedDict)
# pylint: disable=cell-var-from-loop
for saltenv, targets in six.iteritems(top):
sorted_targets = sorted(targets,
key=lambda target: orders[saltenv][target])
for target in sorted_targets:
sorted_top[saltenv][target] = targets[target]
# pylint: enable=cell-var-from-loop
return sorted_top |
def V(a,b,C):
"""
Simple interface to the nuclear attraction function.
>>> from pyquante2 import pgbf,cgbf
>>> s = pgbf(1)
>>> isclose(V(s,s,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,sc,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,s,(0,0,0)),-1.595769)
True
>>> isclose(V(s,sc,(0,0,0)),-1.595769)
True
"""
if b.contracted:
return sum(cb*V(pb,a,C) for (cb,pb) in b)
elif a.contracted:
return sum(ca*V(b,pa,C) for (ca,pa) in a)
return a.norm*b.norm*nuclear_attraction(a.exponent,a.powers,a.origin,
b.exponent,b.powers,b.origin,C) | Simple interface to the nuclear attraction function.
>>> from pyquante2 import pgbf,cgbf
>>> s = pgbf(1)
>>> isclose(V(s,s,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,sc,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,s,(0,0,0)),-1.595769)
True
>>> isclose(V(s,sc,(0,0,0)),-1.595769)
True | Below is the the instruction that describes the task:
### Input:
Simple interface to the nuclear attraction function.
>>> from pyquante2 import pgbf,cgbf
>>> s = pgbf(1)
>>> isclose(V(s,s,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,sc,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,s,(0,0,0)),-1.595769)
True
>>> isclose(V(s,sc,(0,0,0)),-1.595769)
True
### Response:
def V(a,b,C):
"""
Simple interface to the nuclear attraction function.
>>> from pyquante2 import pgbf,cgbf
>>> s = pgbf(1)
>>> isclose(V(s,s,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,sc,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,s,(0,0,0)),-1.595769)
True
>>> isclose(V(s,sc,(0,0,0)),-1.595769)
True
"""
if b.contracted:
return sum(cb*V(pb,a,C) for (cb,pb) in b)
elif a.contracted:
return sum(ca*V(b,pa,C) for (ca,pa) in a)
return a.norm*b.norm*nuclear_attraction(a.exponent,a.powers,a.origin,
b.exponent,b.powers,b.origin,C) |
def gallery_section(images, title):
"""Create detail section with gallery.
Args:
title (str): Title to be displayed for detail section.
images: stream of marv image files
Returns
One detail section.
"""
# pull all images
imgs = []
while True:
img = yield marv.pull(images)
if img is None:
break
imgs.append({'src': img.relpath})
if not imgs:
return
# create gallery widget and section containing it
widget = {'title': images.title, 'gallery': {'images': imgs}}
section = {'title': title, 'widgets': [widget]}
yield marv.push(section) | Create detail section with gallery.
Args:
title (str): Title to be displayed for detail section.
images: stream of marv image files
Returns
One detail section. | Below is the the instruction that describes the task:
### Input:
Create detail section with gallery.
Args:
title (str): Title to be displayed for detail section.
images: stream of marv image files
Returns
One detail section.
### Response:
def gallery_section(images, title):
"""Create detail section with gallery.
Args:
title (str): Title to be displayed for detail section.
images: stream of marv image files
Returns
One detail section.
"""
# pull all images
imgs = []
while True:
img = yield marv.pull(images)
if img is None:
break
imgs.append({'src': img.relpath})
if not imgs:
return
# create gallery widget and section containing it
widget = {'title': images.title, 'gallery': {'images': imgs}}
section = {'title': title, 'widgets': [widget]}
yield marv.push(section) |
def match_pagination(ref_line):
"""Remove footer pagination from references lines"""
pattern = ur'\(?\[?(\d{1,4})\]?\)?\.?\s*$'
re_footer = re.compile(pattern, re.UNICODE)
match = re_footer.match(ref_line)
if match:
return int(match.group(1))
return None | Remove footer pagination from references lines | Below is the the instruction that describes the task:
### Input:
Remove footer pagination from references lines
### Response:
def match_pagination(ref_line):
"""Remove footer pagination from references lines"""
pattern = ur'\(?\[?(\d{1,4})\]?\)?\.?\s*$'
re_footer = re.compile(pattern, re.UNICODE)
match = re_footer.match(ref_line)
if match:
return int(match.group(1))
return None |
def from_sample(sample):
"""Upload results of processing from an analysis pipeline sample.
"""
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files(sample):
approach.update_file(finfo, sample, upload_config)
return [[sample]] | Upload results of processing from an analysis pipeline sample. | Below is the the instruction that describes the task:
### Input:
Upload results of processing from an analysis pipeline sample.
### Response:
def from_sample(sample):
"""Upload results of processing from an analysis pipeline sample.
"""
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files(sample):
approach.update_file(finfo, sample, upload_config)
return [[sample]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.