code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def __return_json(url):
"""
Returns JSON data which is returned by querying the API service
Called by
- meaning()
- synonym()
:param url: the complete formatted url which is then queried using requests
:returns: json content being fed by the API
"""
with try_URL():
response = requests.get(url)
if response.status_code == 200:
return response.json()
else:
return False
|
Returns JSON data which is returned by querying the API service
Called by
- meaning()
- synonym()
:param url: the complete formatted url which is then queried using requests
:returns: json content being fed by the API
|
def _get_warped_array(
input_file=None,
indexes=None,
dst_bounds=None,
dst_shape=None,
dst_crs=None,
resampling=None,
src_nodata=None,
dst_nodata=None
):
"""Extract a numpy array from a raster file."""
try:
return _rasterio_read(
input_file=input_file,
indexes=indexes,
dst_bounds=dst_bounds,
dst_shape=dst_shape,
dst_crs=dst_crs,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
)
except Exception as e:
logger.exception("error while reading file %s: %s", input_file, e)
raise
|
Extract a numpy array from a raster file.
|
def from_xmldict(cls, xml_dict):
"""Create an `Author` from a datacite3 metadata converted by
`xmltodict`.
Parameters
----------
xml_dict : :class:`collections.OrderedDict`
A `dict`-like object mapping XML content for a single record (i.e.,
the contents of the ``record`` tag in OAI-PMH XML). This dict is
typically generated from :mod:`xmltodict`.
"""
name = xml_dict['creatorName']
kwargs = {}
if 'affiliation' in xml_dict:
kwargs['affiliation'] = xml_dict['affiliation']
return cls(name, **kwargs)
|
Create an `Author` from a datacite3 metadata converted by
`xmltodict`.
Parameters
----------
xml_dict : :class:`collections.OrderedDict`
A `dict`-like object mapping XML content for a single record (i.e.,
the contents of the ``record`` tag in OAI-PMH XML). This dict is
typically generated from :mod:`xmltodict`.
|
def estimate_size_in_bytes(cls, key, value, headers):
""" Get the upper bound estimate on the size of record
"""
return (
cls.HEADER_STRUCT.size + cls.MAX_RECORD_OVERHEAD +
cls.size_of(key, value, headers)
)
|
Get the upper bound estimate on the size of record
|
def load(self, context):
"""Returns the plugin, if possible.
Args:
context: The TBContext flags.
Returns:
A BeholderPlugin instance or None if it couldn't be loaded.
"""
try:
# pylint: disable=g-import-not-at-top,unused-import
import tensorflow
except ImportError:
return
# pylint: disable=g-import-not-at-top
from tensorboard.plugins.beholder.beholder_plugin import BeholderPlugin
return BeholderPlugin(context)
|
Returns the plugin, if possible.
Args:
context: The TBContext flags.
Returns:
A BeholderPlugin instance or None if it couldn't be loaded.
|
def _http_get_json(self, url):
"""
Make an HTTP GET request to the specified URL, check that it returned a
JSON response, and returned the data parsed from that response.
Parameters
----------
url
The URL to GET.
Returns
-------
Dictionary of data parsed from a JSON HTTP response.
Exceptions
----------
* PythonKCMeetupsBadJson
* PythonKCMeetupsBadResponse
* PythonKCMeetupsMeetupDown
* PythonKCMeetupsNotJson
* PythonKCMeetupsRateLimitExceeded
"""
response = self._http_get(url)
content_type = response.headers['content-type']
parsed_mimetype = mimeparse.parse_mime_type(content_type)
if parsed_mimetype[1] not in ('json', 'javascript'):
raise PythonKCMeetupsNotJson(content_type)
try:
return json.loads(response.content)
except ValueError as e:
raise PythonKCMeetupsBadJson(e)
|
Make an HTTP GET request to the specified URL, check that it returned a
JSON response, and returned the data parsed from that response.
Parameters
----------
url
The URL to GET.
Returns
-------
Dictionary of data parsed from a JSON HTTP response.
Exceptions
----------
* PythonKCMeetupsBadJson
* PythonKCMeetupsBadResponse
* PythonKCMeetupsMeetupDown
* PythonKCMeetupsNotJson
* PythonKCMeetupsRateLimitExceeded
|
def list_(runas=None):
'''
List all rvm-installed rubies
runas
The user under which to run rvm. If not specified, then rvm will be run
as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rvm.list
'''
rubies = []
output = _rvm(['list'], runas=runas)
if output:
regex = re.compile(r'^[= ]([*> ]) ([^- ]+)-([^ ]+) \[ (.*) \]')
for line in output.splitlines():
match = regex.match(line)
if match:
rubies.append([
match.group(2), match.group(3), match.group(1) == '*'
])
return rubies
|
List all rvm-installed rubies
runas
The user under which to run rvm. If not specified, then rvm will be run
as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rvm.list
|
def hello(self):
"""
Test pulp server connections defined in ~/.config/juicer/config.
"""
for env in self.args.environment:
juicer.utils.Log.log_info("Trying to open a connection to %s, %s ...",
env, self.connectors[env].base_url)
try:
_r = self.connectors[env].get()
juicer.utils.Log.log_info("OK")
except JuicerError:
juicer.utils.Log.log_info("FAILED")
continue
juicer.utils.Log.log_info("Attempting to authenticate as %s",
self.connectors[env].auth[0])
_r = self.connectors[env].get('/repositories/')
if _r.status_code == Constants.PULP_GET_OK:
juicer.utils.Log.log_info("OK")
else:
juicer.utils.Log.log_info("FAILED")
juicer.utils.Log.log_info("Server said: %s", _r.content)
continue
return True
|
Test pulp server connections defined in ~/.config/juicer/config.
|
def delete_mutating_webhook_configuration(self, name, **kwargs):
"""
delete a MutatingWebhookConfiguration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_mutating_webhook_configuration_with_http_info(name, **kwargs)
else:
(data) = self.delete_mutating_webhook_configuration_with_http_info(name, **kwargs)
return data
|
delete a MutatingWebhookConfiguration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
|
def add_instruction (self, instr):
"""
Adds the argument instruction in the list of instructions of this basic block.
Also updates the variable lists (used_variables, defined_variables)
"""
assert(isinstance(instr, Instruction))
self.instruction_list.append(instr)
if instr.lhs not in self.defined_variables:
if isinstance(instr.lhs, Variable):
self.defined_variables.append(instr.lhs)
if isinstance(instr, EqInstruction):
if isinstance(instr.rhs, Variable):
if instr.rhs not in self.used_variables:
self.used_variables.append(instr.rhs)
else:
if isinstance(instr.rhs_1, Variable):
if instr.rhs_1 not in self.used_variables:
self.used_variables.append(instr.rhs_1)
if isinstance(instr.rhs_2, Variable):
if instr.rhs_2 not in self.used_variables:
self.used_variables.append(instr.rhs_2)
|
Adds the argument instruction in the list of instructions of this basic block.
Also updates the variable lists (used_variables, defined_variables)
|
def varvalu(self, varn=None):
'''
$foo
$foo.bar
$foo.bar()
$foo[0]
$foo.bar(10)
'''
self.ignore(whitespace)
if varn is None:
varn = self.varname()
varv = s_ast.VarValue(kids=[varn])
# handle derefs and calls...
while self.more():
if self.nextstr('.'):
varv = self.varderef(varv)
continue
if self.nextstr('('):
varv = self.varcall(varv)
continue
#if self.nextstr('['):
#varv = self.varslice(varv)
break
return varv
|
$foo
$foo.bar
$foo.bar()
$foo[0]
$foo.bar(10)
|
def processRequest(cls, ps, **kw):
"""invokes callback that should return a (request,response) tuple.
representing the SOAP request and response respectively.
ps -- ParsedSoap instance representing HTTP Body.
request -- twisted.web.server.Request
"""
resource = kw['resource']
method = resource.getOperation(ps, None) # This getOperation method is valid for ServiceSOAPBinding subclass
rsp = method(ps, **kw)[1] # return (request, response) but we only need response
return rsp
|
invokes callback that should return a (request,response) tuple.
representing the SOAP request and response respectively.
ps -- ParsedSoap instance representing HTTP Body.
request -- twisted.web.server.Request
|
def remove(self,
package,
shutit_pexpect_child=None,
options=None,
echo=None,
timeout=shutit_global.shutit_global_object.default_timeout,
note=None):
"""Distro-independent remove function.
Takes a package name and runs relevant remove function.
@param package: Package to remove, which is run through package_map.
@param shutit_pexpect_child: See send()
@param options: Dict of options to pass to the remove command,
mapped by install_type.
@param timeout: See send(). Default: 3600
@param note: See send()
@return: True if all ok (i.e. the package was successfully removed),
False otherwise.
@rtype: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
# If separated by spaces, remove separately
if package.find(' ') != -1:
for p in package.split(' '):
self.install(p,shutit_pexpect_child=shutit_pexpect_child,options=options,timeout=timeout,note=note)
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.remove(package,
echo=echo,
options=options,
timeout=timeout,
note=note)
|
Distro-independent remove function.
Takes a package name and runs relevant remove function.
@param package: Package to remove, which is run through package_map.
@param shutit_pexpect_child: See send()
@param options: Dict of options to pass to the remove command,
mapped by install_type.
@param timeout: See send(). Default: 3600
@param note: See send()
@return: True if all ok (i.e. the package was successfully removed),
False otherwise.
@rtype: boolean
|
def get_json_files(files, recursive=False):
"""Return a list of files to validate from `files`. If a member of `files`
is a directory, its children with a ``.json`` extension will be added to
the return value.
Args:
files: A list of file paths and/or directory paths.
recursive: If ``true``, this will descend into any subdirectories
of input directories.
Returns:
A list of file paths to validate.
"""
json_files = []
if not files:
return json_files
for fn in files:
if os.path.isdir(fn):
children = list_json_files(fn, recursive)
json_files.extend(children)
elif is_json(fn):
json_files.append(fn)
else:
continue
if not json_files:
raise NoJSONFileFoundError("No JSON files found!")
return json_files
|
Return a list of files to validate from `files`. If a member of `files`
is a directory, its children with a ``.json`` extension will be added to
the return value.
Args:
files: A list of file paths and/or directory paths.
recursive: If ``true``, this will descend into any subdirectories
of input directories.
Returns:
A list of file paths to validate.
|
def persist(filename):
"""
Append the digital elevation map projected (using lat lon) as variables of
the netcdf file.
Keyword arguments:
filename -- the name of a netcdf file.
"""
dem_projected = obtain_to(filename)
with nc.loader(filename) as root:
data = nc.getvar(root, 'data')
dem = nc.getvar(root, 'dem', 'f4', source=data)
stack = [dim for dim in dem.shape if dim not in dem_projected.shape]
stack = stack[0] if stack else 1
dem[:] = np.vstack(map(lambda x: [dem_projected], range(stack)))
|
Append the digital elevation map projected (using lat lon) as variables of
the netcdf file.
Keyword arguments:
filename -- the name of a netcdf file.
|
def _recover_public_key(G, order, r, s, i, e):
"""Recover a public key from a signature.
See SEC 1: Elliptic Curve Cryptography, section 4.1.6, "Public
Key Recovery Operation".
http://www.secg.org/sec1-v2.pdf
"""
c = G.curve()
# 1.1 Let x = r + jn
x = r + (i // 2) * order
# 1.3 point from x
alpha = (x * x * x + c.a() * x + c.b()) % c.p()
beta = pycoin.ecdsa.numbertheory.modular_sqrt(alpha, c.p())
y = beta if (beta - i) % 2 == 0 else c.p() - beta
# 1.4 Check that nR is at infinity
R = pycoin.ecdsa.ellipticcurve.Point(c, x, y, order)
rInv = pycoin.ecdsa.numbertheory.inverse_mod(r, order) # r^-1
eNeg = -e % order # -e
# 1.6 compute Q = r^-1 (sR - eG)
Q = rInv * (s * R + eNeg * G)
return Q
|
Recover a public key from a signature.
See SEC 1: Elliptic Curve Cryptography, section 4.1.6, "Public
Key Recovery Operation".
http://www.secg.org/sec1-v2.pdf
|
def update_endpoint(self, endpoint_name, endpoint_config_name):
""" Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request
Raise an error if endpoint with endpoint_name does not exist.
Args:
endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update.
endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy.
Returns:
str: Name of the Amazon SageMaker ``Endpoint`` being updated.
"""
if not _deployment_entity_exists(lambda: self.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)):
raise ValueError('Endpoint with name "{}" does not exist; please use an existing endpoint name'
.format(endpoint_name))
self.sagemaker_client.update_endpoint(EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
return endpoint_name
|
Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request
Raise an error if endpoint with endpoint_name does not exist.
Args:
endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update.
endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy.
Returns:
str: Name of the Amazon SageMaker ``Endpoint`` being updated.
|
def program_global_reg(self):
"""
Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
"""
self._clear_strobes()
gr_size = len(self['GLOBAL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:gr_size] = self['GLOBAL_REG'][:] # this will be shifted out
self['SEQ']['GLOBAL_SHIFT_EN'][0:gr_size] = bitarray(gr_size * '1') # this is to enable clock
self['SEQ']['GLOBAL_CTR_LD'][gr_size + 1:gr_size + 2] = bitarray("1") # load signals
self['SEQ']['GLOBAL_DAC_LD'][gr_size + 1:gr_size + 2] = bitarray("1")
# Execute the program (write bits to output pins)
# + 1 extra 0 bit so that everything ends on LOW instead of HIGH
self._run_seq(gr_size + 3)
|
Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
|
def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):
"""
Finds bonded atoms and returns a adjacency matrix of bonded atoms.
Author: "Gowoon Cheon"
Email: "gcheon@stanford.edu"
Args:
struct (Structure): Input structure
tolerance: length in angstroms used in finding bonded atoms. Two atoms
are considered bonded if (radius of atom 1) + (radius of atom 2) +
(tolerance) < (distance between atoms 1 and 2). Default
value = 0.45, the value used by JMol and Cheon et al.
ldict: dictionary of bond lengths used in finding bonded atoms. Values
from JMol are used as default
Returns:
(np.ndarray): A numpy array of shape (number of atoms, number of atoms);
If any image of atom j is bonded to atom i with periodic boundary
conditions, the matrix element [atom i, atom j] is 1.
"""
n_atoms = len(struct.species)
fc = np.array(struct.frac_coords)
fc_copy = np.repeat(fc[:, :, np.newaxis], 27, axis=2)
neighbors = np.array(list(itertools.product([0, 1, -1], [0, 1, -1], [0, 1, -1]))).T
neighbors = np.repeat(neighbors[np.newaxis, :, :], 1, axis=0)
fc_diff = fc_copy - neighbors
species = list(map(str, struct.species))
# in case of charged species
for i, item in enumerate(species):
if not item in ldict.keys():
species[i] = str(Specie.from_string(item).element)
latmat = struct.lattice.matrix
connected_matrix = np.zeros((n_atoms,n_atoms))
for i in range(n_atoms):
for j in range(i + 1, n_atoms):
max_bond_length = ldict[species[i]] + ldict[species[j]] + tolerance
frac_diff = fc_diff[j] - fc_copy[i]
distance_ij = np.dot(latmat.T, frac_diff)
# print(np.linalg.norm(distance_ij,axis=0))
if sum(np.linalg.norm(distance_ij, axis=0) < max_bond_length) > 0:
connected_matrix[i, j] = 1
connected_matrix[j, i] = 1
return connected_matrix
|
Finds bonded atoms and returns a adjacency matrix of bonded atoms.
Author: "Gowoon Cheon"
Email: "gcheon@stanford.edu"
Args:
struct (Structure): Input structure
tolerance: length in angstroms used in finding bonded atoms. Two atoms
are considered bonded if (radius of atom 1) + (radius of atom 2) +
(tolerance) < (distance between atoms 1 and 2). Default
value = 0.45, the value used by JMol and Cheon et al.
ldict: dictionary of bond lengths used in finding bonded atoms. Values
from JMol are used as default
Returns:
(np.ndarray): A numpy array of shape (number of atoms, number of atoms);
If any image of atom j is bonded to atom i with periodic boundary
conditions, the matrix element [atom i, atom j] is 1.
|
def move(self, dst, **kwargs):
"""Move file to a new destination and update ``uri``."""
_fs, filename = opener.parse(self.uri)
_fs_dst, filename_dst = opener.parse(dst)
movefile(_fs, filename, _fs_dst, filename_dst, **kwargs)
self.uri = dst
|
Move file to a new destination and update ``uri``.
|
def system_monitor_sfp_alert_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
sfp = ET.SubElement(system_monitor, "sfp")
alert = ET.SubElement(sfp, "alert")
state = ET.SubElement(alert, "state")
state.text = kwargs.pop('state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def get_layout(self, object):
"""Get complete layout for given object"""
layout = self.create_layout(object)
if isinstance(layout, Component):
layout = Layout(layout)
if isinstance(layout, list):
layout = Layout(*layout)
for update_layout in self.layout_updates:
update_layout(layout, object)
layout.set_object(object)
return layout
|
Get complete layout for given object
|
def cancel():
"""Returns a threading.Event() that will get set when SIGTERM, or
SIGINT are triggered. This can be used to cancel execution of threads.
"""
cancel = threading.Event()
def cancel_execution(signum, frame):
signame = SIGNAL_NAMES.get(signum, signum)
logger.info("Signal %s received, quitting "
"(this can take some time)...", signame)
cancel.set()
signal.signal(signal.SIGINT, cancel_execution)
signal.signal(signal.SIGTERM, cancel_execution)
return cancel
|
Returns a threading.Event() that will get set when SIGTERM, or
SIGINT are triggered. This can be used to cancel execution of threads.
|
def redact_image(
self,
parent,
inspect_config=None,
image_redaction_configs=None,
include_findings=None,
byte_item=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Redacts potentially sensitive info from an image.
This method has limits on input size, processing time, and output size.
See https://cloud.google.com/dlp/docs/redacting-sensitive-data-images to
learn more.
When no InfoTypes or CustomInfoTypes are specified in this request, the
system will automatically choose what detectors to run. By default this may
be all types, but may change over time as detectors are updated.
Example:
>>> from google.cloud import dlp_v2
>>>
>>> client = dlp_v2.DlpServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.redact_image(parent)
Args:
parent (str): The parent resource name, for example projects/my-project-id.
inspect_config (Union[dict, ~google.cloud.dlp_v2.types.InspectConfig]): Configuration for the inspector.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.InspectConfig`
image_redaction_configs (list[Union[dict, ~google.cloud.dlp_v2.types.ImageRedactionConfig]]): The configuration for specifying what content to redact from images.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.ImageRedactionConfig`
include_findings (bool): Whether the response should include findings along with the redacted
image.
byte_item (Union[dict, ~google.cloud.dlp_v2.types.ByteContentItem]): The content must be PNG, JPEG, SVG or BMP.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.ByteContentItem`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dlp_v2.types.RedactImageResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "redact_image" not in self._inner_api_calls:
self._inner_api_calls[
"redact_image"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.redact_image,
default_retry=self._method_configs["RedactImage"].retry,
default_timeout=self._method_configs["RedactImage"].timeout,
client_info=self._client_info,
)
request = dlp_pb2.RedactImageRequest(
parent=parent,
inspect_config=inspect_config,
image_redaction_configs=image_redaction_configs,
include_findings=include_findings,
byte_item=byte_item,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["redact_image"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
Redacts potentially sensitive info from an image.
This method has limits on input size, processing time, and output size.
See https://cloud.google.com/dlp/docs/redacting-sensitive-data-images to
learn more.
When no InfoTypes or CustomInfoTypes are specified in this request, the
system will automatically choose what detectors to run. By default this may
be all types, but may change over time as detectors are updated.
Example:
>>> from google.cloud import dlp_v2
>>>
>>> client = dlp_v2.DlpServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.redact_image(parent)
Args:
parent (str): The parent resource name, for example projects/my-project-id.
inspect_config (Union[dict, ~google.cloud.dlp_v2.types.InspectConfig]): Configuration for the inspector.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.InspectConfig`
image_redaction_configs (list[Union[dict, ~google.cloud.dlp_v2.types.ImageRedactionConfig]]): The configuration for specifying what content to redact from images.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.ImageRedactionConfig`
include_findings (bool): Whether the response should include findings along with the redacted
image.
byte_item (Union[dict, ~google.cloud.dlp_v2.types.ByteContentItem]): The content must be PNG, JPEG, SVG or BMP.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.ByteContentItem`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dlp_v2.types.RedactImageResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
def search(filter, # pylint: disable=C0103
dn=None, # pylint: disable=C0103
scope=None,
attrs=None,
**kwargs):
'''
Run an arbitrary LDAP query and return the results.
CLI Example:
.. code-block:: bash
salt 'ldaphost' ldap.search "filter=cn=myhost"
Return data:
.. code-block:: python
{'myhost': {'count': 1,
'results': [['cn=myhost,ou=hosts,o=acme,c=gb',
{'saltKeyValue': ['ntpserver=ntp.acme.local',
'foo=myfoo'],
'saltState': ['foo', 'bar']}]],
'time': {'human': '1.2ms', 'raw': '0.00123'}}}
Search and connection options can be overridden by specifying the relevant
option as key=value pairs, for example:
.. code-block:: bash
salt 'ldaphost' ldap.search filter=cn=myhost dn=ou=hosts,o=acme,c=gb
scope=1 attrs='' server='localhost' port='7393' tls=True bindpw='ssh'
'''
if not dn:
dn = _config('dn', 'basedn') # pylint: disable=C0103
if not scope:
scope = _config('scope')
if attrs == '': # Allow command line 'return all' attr override
attrs = None
elif attrs is None:
attrs = _config('attrs')
_ldap = _connect(**kwargs)
start = time.time()
log.debug(
'Running LDAP search with filter:%s, dn:%s, scope:%s, '
'attrs:%s', filter, dn, scope, attrs
)
results = _ldap.search_s(dn, int(scope), filter, attrs)
elapsed = (time.time() - start)
if elapsed < 0.200:
elapsed_h = six.text_type(round(elapsed * 1000, 1)) + 'ms'
else:
elapsed_h = six.text_type(round(elapsed, 2)) + 's'
ret = {
'results': results,
'count': len(results),
'time': {'human': elapsed_h, 'raw': six.text_type(round(elapsed, 5))},
}
return ret
|
Run an arbitrary LDAP query and return the results.
CLI Example:
.. code-block:: bash
salt 'ldaphost' ldap.search "filter=cn=myhost"
Return data:
.. code-block:: python
{'myhost': {'count': 1,
'results': [['cn=myhost,ou=hosts,o=acme,c=gb',
{'saltKeyValue': ['ntpserver=ntp.acme.local',
'foo=myfoo'],
'saltState': ['foo', 'bar']}]],
'time': {'human': '1.2ms', 'raw': '0.00123'}}}
Search and connection options can be overridden by specifying the relevant
option as key=value pairs, for example:
.. code-block:: bash
salt 'ldaphost' ldap.search filter=cn=myhost dn=ou=hosts,o=acme,c=gb
scope=1 attrs='' server='localhost' port='7393' tls=True bindpw='ssh'
|
def validate_v_rgb(value):
"""Validate a V_RGB value."""
if len(value) != 6:
raise vol.Invalid(
'{} is not six characters long'.format(value))
return validate_hex(value)
|
Validate a V_RGB value.
|
def create_locks(context, network_ids, addresses):
"""Creates locks for each IP address that is null-routed.
The function creates the IP address if it is not present in the database.
"""
for address in addresses:
address_model = None
try:
address_model = _find_or_create_address(
context, network_ids, address)
lock_holder = None
if address_model.lock_id:
lock_holder = db_api.lock_holder_find(
context,
lock_id=address_model.lock_id, name=LOCK_NAME,
scope=db_api.ONE)
if not lock_holder:
LOG.info("Creating lock holder on IPAddress %s with id %s",
address_model.address_readable,
address_model.id)
db_api.lock_holder_create(
context, address_model, name=LOCK_NAME, type="ip_address")
except Exception:
LOG.exception("Failed to create lock holder on IPAddress %s",
address_model)
continue
context.session.flush()
|
Creates locks for each IP address that is null-routed.
The function creates the IP address if it is not present in the database.
|
def _index_verify(index_file, **extra_kwargs):
"""Populate the template and compare to documentation index file.
Used for both ``docs/index.rst`` and ``docs/index.rst.release.template``.
Args:
index_file (str): Filename to compare against.
extra_kwargs (Dict[str, str]): Over-ride for template arguments.
One **special** keyword is ``side_effect``, which can be used
to update the template output after the fact.
Raises:
ValueError: If the current ``index.rst`` doesn't agree with the
expected value computed from the template.
"""
side_effect = extra_kwargs.pop("side_effect", None)
with open(TEMPLATE_FILE, "r") as file_obj:
template = file_obj.read()
template_kwargs = {
"code_block1": SPHINX_CODE_BLOCK1,
"code_block2": SPHINX_CODE_BLOCK2,
"code_block3": SPHINX_CODE_BLOCK3,
"testcleanup": TEST_CLEANUP,
"toctree": TOCTREE,
"bernstein_basis": BERNSTEIN_BASIS_SPHINX,
"bezier_defn": BEZIER_DEFN_SPHINX,
"sum_to_unity": SUM_TO_UNITY_SPHINX,
"img_prefix": "",
"extra_links": "",
"docs": "",
"docs_img": "",
"pypi": "\n\n|pypi| ",
"pypi_img": PYPI_IMG,
"versions": "|versions|\n\n",
"versions_img": VERSIONS_IMG,
"rtd_version": RTD_VERSION,
"revision": REVISION,
"circleci_badge": CIRCLECI_BADGE,
"circleci_path": "",
"travis_badge": TRAVIS_BADGE,
"travis_path": "",
"appveyor_badge": APPVEYOR_BADGE,
"appveyor_path": "",
"coveralls_badge": COVERALLS_BADGE,
"coveralls_path": COVERALLS_PATH,
"zenodo": "|zenodo|",
"zenodo_img": ZENODO_IMG,
"joss": " |JOSS|",
"joss_img": JOSS_IMG,
}
template_kwargs.update(**extra_kwargs)
expected = template.format(**template_kwargs)
if side_effect is not None:
expected = side_effect(expected)
with open(index_file, "r") as file_obj:
contents = file_obj.read()
if contents != expected:
err_msg = "\n" + get_diff(
contents,
expected,
index_file + ".actual",
index_file + ".expected",
)
raise ValueError(err_msg)
else:
rel_name = os.path.relpath(index_file, _ROOT_DIR)
msg = "{} contents are as expected.".format(rel_name)
print(msg)
|
Populate the template and compare to documentation index file.
Used for both ``docs/index.rst`` and ``docs/index.rst.release.template``.
Args:
index_file (str): Filename to compare against.
extra_kwargs (Dict[str, str]): Over-ride for template arguments.
One **special** keyword is ``side_effect``, which can be used
to update the template output after the fact.
Raises:
ValueError: If the current ``index.rst`` doesn't agree with the
expected value computed from the template.
|
def unmark_featured(self, request, queryset):
"""
Un-Mark selected featured posts.
"""
queryset.update(featured=False)
self.message_user(
request, _('Selected entries are no longer marked as featured.'))
|
Un-Mark selected featured posts.
|
def get_complex_and_node_state(self, hosts, services):
"""Get state , handle AND aggregation ::
* Get the worst state. 2 or max of sons (3 <=> UNKNOWN < CRITICAL <=> 2)
* Revert if it's a not node
:param hosts: host objects
:param services: service objects
:return: 0, 1 or 2
:rtype: int
"""
# First we get the state of all our sons
states = [s.get_state(hosts, services) for s in self.sons]
# Next we calculate the worst state
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
# Then we handle eventual not value
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state
|
Get state , handle AND aggregation ::
* Get the worst state. 2 or max of sons (3 <=> UNKNOWN < CRITICAL <=> 2)
* Revert if it's a not node
:param hosts: host objects
:param services: service objects
:return: 0, 1 or 2
:rtype: int
|
def getFaxResultRN(self, CorpNum, RequestNum, UserID=None):
""" ํฉ์ค ์ ์ก๊ฒฐ๊ณผ ์กฐํ
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
RequestNum : ์ ์ก์์ฒญ์ ํ ๋นํ ์ ์ก์์ฒญ๋ฒํธ
UserID : ํ๋นํ์ ์์ด๋
return
ํฉ์ค์ ์ก์ ๋ณด as list
raise
PopbillException
"""
if RequestNum == None or RequestNum == '':
raise PopbillException(-99999999, "์์ฒญ๋ฒํธ๊ฐ ์
๋ ฅ๋์ง ์์์ต๋๋ค.")
return self._httpget('/FAX/Get/' + RequestNum, CorpNum, UserID)
|
ํฉ์ค ์ ์ก๊ฒฐ๊ณผ ์กฐํ
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
RequestNum : ์ ์ก์์ฒญ์ ํ ๋นํ ์ ์ก์์ฒญ๋ฒํธ
UserID : ํ๋นํ์ ์์ด๋
return
ํฉ์ค์ ์ก์ ๋ณด as list
raise
PopbillException
|
def addSplits(self, login, tableName, splits):
"""
Parameters:
- login
- tableName
- splits
"""
self.send_addSplits(login, tableName, splits)
self.recv_addSplits()
|
Parameters:
- login
- tableName
- splits
|
def _get_types_from_sample(result_vars, sparql_results_json):
"""Return types if homogenous within sample
Compare up to 10 rows of results to determine homogeneity.
DESCRIBE and CONSTRUCT queries, for example,
:param result_vars:
:param sparql_results_json:
"""
total_bindings = len(sparql_results_json['results']['bindings'])
homogeneous_types = {}
for result_var in result_vars:
var_types = set()
var_datatypes = set()
for i in range(0, min(total_bindings, 10)):
binding = sparql_results_json['results']['bindings'][i]
rdf_term = binding.get(result_var)
if rdf_term is not None: # skip missing values
var_types.add(rdf_term.get('type'))
var_datatypes.add(rdf_term.get('datatype'))
if len(var_types) > 1 or len(var_datatypes) > 1:
return None # Heterogeneous types
else:
homogeneous_types[result_var] = {
'type': var_types.pop() if var_types else None,
'datatype': var_datatypes.pop() if var_datatypes else None
}
return homogeneous_types
|
Return types if homogenous within sample
Compare up to 10 rows of results to determine homogeneity.
DESCRIBE and CONSTRUCT queries, for example,
:param result_vars:
:param sparql_results_json:
|
def to_key(literal_or_identifier):
''' returns string representation of this object'''
if literal_or_identifier['type'] == 'Identifier':
return literal_or_identifier['name']
elif literal_or_identifier['type'] == 'Literal':
k = literal_or_identifier['value']
if isinstance(k, float):
return unicode(float_repr(k))
elif 'regex' in literal_or_identifier:
return compose_regex(k)
elif isinstance(k, bool):
return u'true' if k else u'false'
elif k is None:
return u'null'
else:
return unicode(k)
|
returns string representation of this object
|
def fetch_and_parse(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return the root Element of the response."""
doc = ElementTree.parse(fetch(method, uri, params_prefix, **params))
return _parse(doc.getroot())
|
Fetch the given uri and return the root Element of the response.
|
def _report_disk_stats(self):
"""Report metrics about the volume space usage"""
stats = {
'docker.data.used': None,
'docker.data.total': None,
'docker.data.free': None,
'docker.metadata.used': None,
'docker.metadata.total': None,
'docker.metadata.free': None
# these two are calculated by _calc_percent_disk_stats
# 'docker.data.percent': None,
# 'docker.metadata.percent': None
}
info = self.docker_util.client.info()
driver_status = info.get('DriverStatus', [])
if not driver_status:
self.log.warning('Disk metrics collection is enabled but docker info did not'
' report any. Your storage driver might not support them, skipping.')
return
for metric in driver_status:
# only consider metrics about disk space
if len(metric) == 2 and 'Space' in metric[0]:
# identify Data and Metadata metrics
mtype = 'data'
if 'Metadata' in metric[0]:
mtype = 'metadata'
if 'Used' in metric[0]:
stats['docker.{0}.used'.format(mtype)] = metric[1]
elif 'Space Total' in metric[0]:
stats['docker.{0}.total'.format(mtype)] = metric[1]
elif 'Space Available' in metric[0]:
stats['docker.{0}.free'.format(mtype)] = metric[1]
stats = self._format_disk_metrics(stats)
stats.update(self._calc_percent_disk_stats(stats))
tags = self._get_tags()
for name, val in stats.iteritems():
if val is not None:
self.gauge(name, val, tags)
|
Report metrics about the volume space usage
|
def calculate_path(self, remote_relative_path, input_type):
""" Only for used by Pulsar client, should override for managers to
enforce security and make the directory if needed.
"""
directory, allow_nested_files = self._directory_for_file_type(input_type)
return self.path_helper.remote_join(directory, remote_relative_path)
|
Only for used by Pulsar client, should override for managers to
enforce security and make the directory if needed.
|
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
|
Remember a single drawable tuple to paint later.
|
def check_index(self, key, *, index):
"""Fails the transaction if Key does not have a modify index equal to
Index
Parameters:
key (str): Key to check
index (ObjectIndex): Index ID
"""
self.append({
"Verb": "check-index",
"Key": key,
"Index": extract_attr(index, keys=["ModifyIndex", "Index"])
})
return self
|
Fails the transaction if Key does not have a modify index equal to
Index
Parameters:
key (str): Key to check
index (ObjectIndex): Index ID
|
def _raise_unrecoverable_error_client(self, exception):
"""
Raises an exceptions.ClientError with a message telling that the error probably comes from the client
configuration.
:param exception: Exception that caused the ClientError
:type exception: Exception
:raise exceptions.ClientError
"""
message = ('There was an unrecoverable error during the HTTP request which is probably related to your '
'configuration. Please verify `' + self.DEPENDENCY + '` library configuration and update it. If the '
'issue persists, do not hesitate to contact us with the following information: `' + repr(exception) +
'`.')
raise exceptions.ClientError(message, client_exception=exception)
|
Raises an exceptions.ClientError with a message telling that the error probably comes from the client
configuration.
:param exception: Exception that caused the ClientError
:type exception: Exception
:raise exceptions.ClientError
|
def get_arp_output_arp_entry_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address = ET.SubElement(arp_entry, "ip-address")
ip_address.text = kwargs.pop('ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _spelling_pipeline(self, sources, options, personal_dict):
"""Check spelling pipeline."""
for source in self._pipeline_step(sources, options, personal_dict):
# Don't waste time on empty strings
if source._has_error():
yield Results([], source.context, source.category, source.error)
elif not source.text or source.text.isspace():
continue
else:
encoding = source.encoding
if source._is_bytes():
text = source.text
else:
# UTF-16 and UTF-32 don't work well with Aspell and Hunspell,
# so encode with the compatible UTF-8 instead.
if encoding.startswith(('utf-16', 'utf-32')):
encoding = 'utf-8'
text = source.text.encode(encoding)
self.log('', 3)
self.log(text, 3)
cmd = self.setup_command(encoding, options, personal_dict)
self.log("Command: " + str(cmd), 4)
try:
wordlist = util.call_spellchecker(cmd, input_text=text, encoding=encoding)
yield Results(
[w for w in sorted(set(wordlist.replace('\r', '').split('\n'))) if w],
source.context,
source.category
)
except Exception as e: # pragma: no cover
err = self.get_error(e)
yield Results([], source.context, source.category, err)
|
Check spelling pipeline.
|
def generate_sky_catalog(image, refwcs, **kwargs):
"""Build source catalog from input image using photutils.
This script borrows heavily from build_source_catalog.
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
----------
image : ~astropy.io.fits.HDUList`
Input image.
refwcs : `~stwcs.wcsutils.HSTWCS`
Definition of the reference frame WCS.
dqname : str
EXTNAME for the DQ array, if present, in the input image.
output : bool
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
threshold : float, optional
This parameter controls the S/N threshold used for identifying sources in
the image relative to the background RMS in much the same way that
the 'threshold' parameter in 'tweakreg' works.
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog.
Returns
--------
master_cat : `~astropy.table.Table`
Source catalog for all 'valid' sources identified from all chips of the
input image with positions translated to the reference WCS coordinate
frame.
"""
# Extract source catalogs for each chip
source_cats = generate_source_catalog(image, **kwargs)
# Build source catalog for entire image
master_cat = None
numSci = countExtn(image, extname='SCI')
# if no refwcs specified, build one now...
if refwcs is None:
refwcs = build_reference_wcs([image])
for chip in range(numSci):
chip += 1
# work with sources identified from this specific chip
seg_tab_phot = source_cats[chip]
if seg_tab_phot is None:
continue
# Convert pixel coordinates from this chip to sky coordinates
chip_wcs = wcsutil.HSTWCS(image, ext=('sci', chip))
seg_ra, seg_dec = chip_wcs.all_pix2world(seg_tab_phot['xcentroid'], seg_tab_phot['ycentroid'], 1)
# Convert sky positions to pixel positions in the reference WCS frame
seg_xy_out = refwcs.all_world2pix(seg_ra, seg_dec, 1)
seg_tab_phot['xcentroid'] = seg_xy_out[0]
seg_tab_phot['ycentroid'] = seg_xy_out[1]
if master_cat is None:
master_cat = seg_tab_phot
else:
master_cat = vstack([master_cat, seg_tab_phot])
return master_cat
|
Build source catalog from input image using photutils.
This script borrows heavily from build_source_catalog.
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
----------
image : ~astropy.io.fits.HDUList`
Input image.
refwcs : `~stwcs.wcsutils.HSTWCS`
Definition of the reference frame WCS.
dqname : str
EXTNAME for the DQ array, if present, in the input image.
output : bool
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
threshold : float, optional
This parameter controls the S/N threshold used for identifying sources in
the image relative to the background RMS in much the same way that
the 'threshold' parameter in 'tweakreg' works.
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog.
Returns
--------
master_cat : `~astropy.table.Table`
Source catalog for all 'valid' sources identified from all chips of the
input image with positions translated to the reference WCS coordinate
frame.
|
def string2json(self, string):
"""Convert json into its string representation.
Used for writing outputs to markdown."""
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return cast_unicode(json.dumps(string, **kwargs), 'utf-8')
|
Convert json into its string representation.
Used for writing outputs to markdown.
|
def RIBSystemRouteLimitExceeded_originator_switch_info_switchIpV6Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
RIBSystemRouteLimitExceeded = ET.SubElement(config, "RIBSystemRouteLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(RIBSystemRouteLimitExceeded, "originator-switch-info")
switchIpV6Address = ET.SubElement(originator_switch_info, "switchIpV6Address")
switchIpV6Address.text = kwargs.pop('switchIpV6Address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _load_cell(args, cell_body):
"""Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bq load <optional args>
Args:
args: the arguments following '%bq load'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
A message about whether the load succeeded or failed.
"""
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env, False) or {}
parameters = config.get('parameters') or []
if parameters:
jsonschema.validate({'parameters': parameters}, BigQuerySchema.QUERY_PARAMS_SCHEMA)
name = google.datalab.bigquery.Query.resolve_parameters(args['table'], parameters)
table = _get_table(name)
if not table:
table = bigquery.Table(name)
if args['mode'] == 'create':
if table.exists():
raise Exception('table %s already exists; use "append" or "overwrite" as mode.' % name)
if not cell_body or 'schema' not in cell_body:
raise Exception('Table does not exist, and no schema specified in cell; cannot load.')
schema = config['schema']
# schema can be an instance of bigquery.Schema.
# For example, user can run "my_schema = bigquery.Schema.from_data(df)" in a previous cell and
# specify "schema: $my_schema" in cell input.
if not isinstance(schema, bigquery.Schema):
jsonschema.validate({'schema': schema}, BigQuerySchema.TABLE_SCHEMA_SCHEMA)
schema = bigquery.Schema(schema)
table.create(schema=schema)
elif not table.exists():
raise Exception('table %s does not exist; use "create" as mode.' % name)
csv_options = bigquery.CSVOptions(delimiter=args['delimiter'], skip_leading_rows=args['skip'],
allow_jagged_rows=not args['strict'], quote=args['quote'])
path = google.datalab.bigquery.Query.resolve_parameters(args['path'], parameters)
job = table.load(path, mode=args['mode'], source_format=args['format'], csv_options=csv_options,
ignore_unknown_values=not args['strict'])
if job.failed:
raise Exception('Load failed: %s' % str(job.fatal_error))
elif job.errors:
raise Exception('Load completed with errors: %s' % str(job.errors))
|
Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bq load <optional args>
Args:
args: the arguments following '%bq load'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
A message about whether the load succeeded or failed.
|
def _parseAttrs(self, attrsStr):
"""
Parse the attributes and values
"""
attributes = dict()
for attrStr in self.SPLIT_ATTR_COL_RE.split(attrsStr):
name, vals = self._parseAttrVal(attrStr)
if name in attributes:
raise GFF3Exception(
"duplicated attribute name: {}".format(name),
self.fileName, self.lineNumber)
attributes[name] = vals
return attributes
|
Parse the attributes and values
|
def currentView(cls, parent=None):
"""
Returns the current view for the given class within a viewWidget. If
no view widget is supplied, then a blank view is returned.
:param viewWidget | <projexui.widgets.xviewwidget.XViewWidget> || None
:return <XView> || None
"""
if parent is None:
parent = projexui.topWindow()
for inst in parent.findChildren(cls):
if inst.isCurrent():
return inst
return None
|
Returns the current view for the given class within a viewWidget. If
no view widget is supplied, then a blank view is returned.
:param viewWidget | <projexui.widgets.xviewwidget.XViewWidget> || None
:return <XView> || None
|
def retrieve_activity_profile(self, activity, profile_id):
"""Retrieve activity profile with the specified parameters
:param activity: Activity object of the desired activity profile
:type activity: :class:`tincan.activity.Activity`
:param profile_id: UUID of the desired profile
:type profile_id: str | unicode
:return: LRS Response object with an activity profile doc as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
if not isinstance(activity, Activity):
activity = Activity(activity)
request = HTTPRequest(
method="GET",
resource="activities/profile",
ignore404=True
)
request.query_params = {
"profileId": profile_id,
"activityId": activity.id
}
lrs_response = self._send_request(request)
if lrs_response.success:
doc = ActivityProfileDocument(
id=profile_id,
content=lrs_response.data,
activity=activity
)
headers = lrs_response.response.getheaders()
if "lastModified" in headers and headers["lastModified"] is not None:
doc.timestamp = headers["lastModified"]
if "contentType" in headers and headers["contentType"] is not None:
doc.content_type = headers["contentType"]
if "etag" in headers and headers["etag"] is not None:
doc.etag = headers["etag"]
lrs_response.content = doc
return lrs_response
|
Retrieve activity profile with the specified parameters
:param activity: Activity object of the desired activity profile
:type activity: :class:`tincan.activity.Activity`
:param profile_id: UUID of the desired profile
:type profile_id: str | unicode
:return: LRS Response object with an activity profile doc as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
|
def parse_setup(raw_frames, destination_frame=None, header=0, separator=None, column_names=None,
column_types=None, na_strings=None, skipped_columns=None, custom_non_data_line_markers=None):
"""
Retrieve H2O's best guess as to what the structure of the data file is.
During parse setup, the H2O cluster will make several guesses about the attributes of
the data. This method allows a user to perform corrective measures by updating the
returning dictionary from this method. This dictionary is then fed into `parse_raw` to
produce the H2OFrame instance.
:param raw_frames: a collection of imported file frames
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
automatically be generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param separator: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param column_names: A list of column names for the file. If skipped_columns are specified, only list column names
of columns that are not skipped.
:param column_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. If skipped_columns are specified, only list column types of columns that are not skipped.
The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a dictionary containing parse parameters guessed by the H2O backend.
"""
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(raw_frames, str, [str])
assert_is_type(destination_frame, None, str)
assert_is_type(header, -1, 0, 1)
assert_is_type(separator, None, I(str, lambda s: len(s) == 1))
assert_is_type(column_names, [str], None)
assert_is_type(column_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
check_frame_id(destination_frame)
# The H2O backend only accepts things that are quoted
if is_type(raw_frames, str): raw_frames = [raw_frames]
# temporary dictionary just to pass the following information to the parser: header, separator
kwargs = {"check_header": header, "source_frames": [quoted(frame_id) for frame_id in raw_frames]}
if separator:
kwargs["separator"] = ord(separator)
if custom_non_data_line_markers is not None:
kwargs["custom_non_data_line_markers"] = custom_non_data_line_markers;
j = api("POST /3/ParseSetup", data=kwargs)
if "warnings" in j and j["warnings"]:
for w in j["warnings"]:
warnings.warn(w)
# TODO: really should be url encoding...
if destination_frame:
j["destination_frame"] = destination_frame
parse_column_len = len(j["column_types"]) if skipped_columns is None else (len(j["column_types"])-len(skipped_columns))
tempColumnNames = j["column_names"] if j["column_names"] is not None else gen_header(j["number_columns"])
useType = [True]*len(tempColumnNames)
if skipped_columns is not None:
useType = [True]*len(tempColumnNames)
for ind in range(len(tempColumnNames)):
if ind in skipped_columns:
useType[ind]=False
if column_names is not None:
if not isinstance(column_names, list): raise ValueError("col_names should be a list")
if (skipped_columns is not None) and len(skipped_columns)>0:
if (len(column_names)) != parse_column_len:
raise ValueError(
"length of col_names should be equal to the number of columns parsed: %d vs %d"
% (len(column_names), parse_column_len))
else:
if len(column_names) != len(j["column_types"]): raise ValueError(
"length of col_names should be equal to the number of columns: %d vs %d"
% (len(column_names), len(j["column_types"])))
j["column_names"] = column_names
counter = 0
for ind in range(len(tempColumnNames)):
if useType[ind]:
tempColumnNames[ind]=column_names[counter]
counter=counter+1
if (column_types is not None): # keep the column types to include all columns
if isinstance(column_types, dict):
# overwrite dictionary to ordered list of column types. if user didn't specify column type for all names,
# use type provided by backend
if j["column_names"] is None: # no colnames discovered! (C1, C2, ...)
j["column_names"] = gen_header(j["number_columns"])
if not set(column_types.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in col_types is not a subset of the column names")
idx = 0
column_types_list = []
for name in tempColumnNames: # column_names may have already been changed
if name in column_types:
column_types_list.append(column_types[name])
else:
column_types_list.append(j["column_types"][idx])
idx += 1
column_types = column_types_list
elif isinstance(column_types, list):
if len(column_types) != parse_column_len: raise ValueError(
"length of col_types should be equal to the number of parsed columns")
# need to expand it out to all columns, not just the parsed ones
column_types_list = j["column_types"]
counter = 0
for ind in range(len(j["column_types"])):
if useType[ind] and (column_types[counter]!=None):
column_types_list[ind]=column_types[counter]
counter=counter+1
column_types = column_types_list
else: # not dictionary or list
raise ValueError("col_types should be a list of types or a dictionary of column names to types")
j["column_types"] = column_types
if na_strings is not None:
if isinstance(na_strings, dict):
# overwrite dictionary to ordered list of lists of na_strings
if not j["column_names"]: raise ValueError("column names should be specified")
if not set(na_strings.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in na_strings is not a subset of the column names")
j["na_strings"] = [[] for _ in range(len(j["column_names"]))]
for name, na in na_strings.items():
idx = j["column_names"].index(name)
if is_type(na, str): na = [na]
for n in na: j["na_strings"][idx].append(quoted(n))
elif is_type(na_strings, [[str]]):
if len(na_strings) != len(j["column_types"]):
raise ValueError("length of na_strings should be equal to the number of columns")
j["na_strings"] = [[quoted(na) for na in col] if col is not None else [] for col in na_strings]
elif isinstance(na_strings, list):
j["na_strings"] = [[quoted(na) for na in na_strings]] * len(j["column_types"])
else: # not a dictionary or list
raise ValueError(
"na_strings should be a list, a list of lists (one list per column), or a dictionary of column "
"names to strings which are to be interpreted as missing values")
if skipped_columns is not None:
if isinstance(skipped_columns, list):
j["skipped_columns"] = []
for colidx in skipped_columns:
if (colidx < 0): raise ValueError("skipped column index cannot be negative")
j["skipped_columns"].append(colidx)
# quote column names and column types also when not specified by user
if j["column_names"]: j["column_names"] = list(map(quoted, j["column_names"]))
j["column_types"] = list(map(quoted, j["column_types"]))
return j
|
Retrieve H2O's best guess as to what the structure of the data file is.
During parse setup, the H2O cluster will make several guesses about the attributes of
the data. This method allows a user to perform corrective measures by updating the
returning dictionary from this method. This dictionary is then fed into `parse_raw` to
produce the H2OFrame instance.
:param raw_frames: a collection of imported file frames
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
automatically be generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param separator: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param column_names: A list of column names for the file. If skipped_columns are specified, only list column names
of columns that are not skipped.
:param column_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. If skipped_columns are specified, only list column types of columns that are not skipped.
The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a dictionary containing parse parameters guessed by the H2O backend.
|
def getTrackedDeviceIndexForControllerRole(self, unDeviceType):
"""Returns the device index associated with a specific role, for example the left hand or the right hand. This function is deprecated in favor of the new IVRInput system."""
fn = self.function_table.getTrackedDeviceIndexForControllerRole
result = fn(unDeviceType)
return result
|
Returns the device index associated with a specific role, for example the left hand or the right hand. This function is deprecated in favor of the new IVRInput system.
|
def start(self):
"""
Start all the processes
"""
Global.LOGGER.info("starting the flow manager")
self._start_actions()
self._start_message_fetcher()
Global.LOGGER.debug("flow manager started")
|
Start all the processes
|
def header_canonical(self, header_name):
"""Translate HTTP headers to Django header names."""
# Translate as stated in the docs:
# https://docs.djangoproject.com/en/1.6/ref/request-response/#django.http.HttpRequest.META
header_name = header_name.lower()
if header_name == 'content-type':
return 'CONTENT-TYPE'
elif header_name == 'content-length':
return 'CONTENT-LENGTH'
return 'HTTP_%s' % header_name.replace('-', '_').upper()
|
Translate HTTP headers to Django header names.
|
def escape(s, quote=False):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes.
"""
if s is None:
return ''
if not isinstance(s, (str, bytes)):
s = str(s)
if isinstance(s, bytes):
try:
s.decode('ascii')
except UnicodeDecodeError:
s = s.decode('utf-8', 'replace')
s = s.replace('&', '&').replace('<', '<').replace('>', '>')
if quote:
s = s.replace('"', """)
return s
|
Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes.
|
def read_function(data, window, ij, g_args):
"""Takes an array, and sets any value above the mean to the max, the rest to 0"""
output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max()
return output
|
Takes an array, and sets any value above the mean to the max, the rest to 0
|
def download_album_by_id(self, album_id, album_name):
"""Download a album by its name.
:params album_id: album id.
:params album_name: album name.
"""
try:
# use old api
songs = self.crawler.get_album_songs(album_id)
except RequestException as exception:
click.echo(exception)
else:
folder = os.path.join(self.folder, album_name)
for song in songs:
self.download_song_by_id(song.song_id, song.song_name, folder)
|
Download a album by its name.
:params album_id: album id.
:params album_name: album name.
|
def granularity_to_time(s):
"""convert a named granularity into seconds.
get value in seconds for named granularities: M1, M5 ... H1 etc.
>>> print(granularity_to_time("M5"))
300
"""
mfact = {
'S': 1,
'M': 60,
'H': 3600,
'D': 86400,
'W': 604800,
}
try:
f, n = re.match("(?P<f>[SMHDW])(?:(?P<n>\d+)|)", s).groups()
n = n if n else 1
return mfact[f] * int(n)
except Exception as e:
raise ValueError(e)
|
convert a named granularity into seconds.
get value in seconds for named granularities: M1, M5 ... H1 etc.
>>> print(granularity_to_time("M5"))
300
|
def add_inverse_query(self, key_val={}):
"""
Add an es_dsl inverse query object to the es_dsl Search object
:param key_val: a key-value pair(dict) containing the query to be added to the search object
:returns: self, which allows the method to be chainable with the other methods
"""
q = Q("match", **key_val)
self.search = self.search.query(~q)
return self
|
Add an es_dsl inverse query object to the es_dsl Search object
:param key_val: a key-value pair(dict) containing the query to be added to the search object
:returns: self, which allows the method to be chainable with the other methods
|
def route_election(self, election):
"""
Legislative or executive office?
"""
if (
election.election_type.slug == ElectionType.GENERAL
or ElectionType.GENERAL_RUNOFF
):
self.bootstrap_general_election(election)
elif election.race.special:
self.bootstrap_special_election(election)
if election.race.office.is_executive:
self.bootstrap_executive_office(election)
else:
self.bootstrap_legislative_office(election)
|
Legislative or executive office?
|
def get_network_attributegroup_items(network_id, **kwargs):
"""
Get all the group items in a network
"""
user_id=kwargs.get('user_id')
net_i = _get_network(network_id)
net_i.check_read_permission(user_id)
group_items_i = db.DBSession.query(AttrGroupItem).filter(
AttrGroupItem.network_id==network_id).all()
return group_items_i
|
Get all the group items in a network
|
def list_names():
""" List all known color names. """
names = get_all_names()
# This is 375 right now. Probably won't ever change, but I'm not sure.
nameslen = len(names)
print('\nListing {} names:\n'.format(nameslen))
# Using 3 columns of names, still alphabetically sorted from the top down.
# Longest name so far: lightgoldenrodyellow (20 chars)
namewidth = 20
# namewidth * columns == 60, colorwidth * columns == 18, final == 78.
swatch = ' ' * 9
third = nameslen // 3
lastthird = third * 2
cols = (
names[0: third],
names[third: lastthird],
names[lastthird:],
)
# Exactly enough spaces to fill in a blank item (+2 for ': ').
# This may not ever be used, unless another 'known name' is added.
blankitem = ' ' * (namewidth + len(swatch) + 2)
for i in range(third):
nameset = []
for colset in cols:
try:
nameset.append(colset[i])
except IndexError:
nameset.append(None)
continue
line = C('').join(
C(': ').join(
C(name.rjust(namewidth)),
C(swatch, back=name),
) if name else blankitem
for name in nameset
)
print(line)
return 0
|
List all known color names.
|
def _set_ospf(self, v, load=False):
"""
Setter method for ospf, mapped from YANG variable /rbridge_id/router/ospf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrf",ospf.ospf, yang_name="ospf", rest_name="ospf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf', extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF)', u'cli-run-template-enter': u' router ospf$($(vrf)==default-vrf?: vrf $(vrf))\n', u'sort-priority': u'70', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'OSPFConfigCallPoint', u'cli-mode-name': u'config-router-ospf-vrf-$(vrf)'}}), is_container='list', yang_name="ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF)', u'cli-run-template-enter': u' router ospf$($(vrf)==default-vrf?: vrf $(vrf))\n', u'sort-priority': u'70', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'OSPFConfigCallPoint', u'cli-mode-name': u'config-router-ospf-vrf-$(vrf)'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ospf must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("vrf",ospf.ospf, yang_name="ospf", rest_name="ospf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf', extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF)', u'cli-run-template-enter': u' router ospf$($(vrf)==default-vrf?: vrf $(vrf))\n', u'sort-priority': u'70', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'OSPFConfigCallPoint', u'cli-mode-name': u'config-router-ospf-vrf-$(vrf)'}}), is_container='list', yang_name="ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF)', u'cli-run-template-enter': u' router ospf$($(vrf)==default-vrf?: vrf $(vrf))\n', u'sort-priority': u'70', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'OSPFConfigCallPoint', u'cli-mode-name': u'config-router-ospf-vrf-$(vrf)'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='list', is_config=True)""",
})
self.__ospf = t
if hasattr(self, '_set'):
self._set()
|
Setter method for ospf, mapped from YANG variable /rbridge_id/router/ospf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf() directly.
|
def system_find_users(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /system/findUsers API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindUsers
"""
return DXHTTPRequest('/system/findUsers', input_params, always_retry=always_retry, **kwargs)
|
Invokes the /system/findUsers API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindUsers
|
def from_dict(cls, d, ignore=()):
"""Create an instance from a serialized version of cls
Args:
d(dict): Endpoints of cls to set
ignore(tuple): Keys to ignore
Returns:
Instance of this class
"""
filtered = {}
for k, v in d.items():
if k == "typeid":
assert v == cls.typeid, \
"Dict has typeid %s but %s has typeid %s" % \
(v, cls, cls.typeid)
elif k not in ignore:
filtered[k] = v
try:
inst = cls(**filtered)
except TypeError as e:
raise TypeError("%s raised error: %s" % (cls.typeid, str(e)))
return inst
|
Create an instance from a serialized version of cls
Args:
d(dict): Endpoints of cls to set
ignore(tuple): Keys to ignore
Returns:
Instance of this class
|
def try_checkpoint_metadata(self, trial):
"""Checkpoints metadata.
Args:
trial (Trial): Trial to checkpoint.
"""
if trial._checkpoint.storage == Checkpoint.MEMORY:
logger.debug("Not saving data for trial w/ memory checkpoint.")
return
try:
logger.debug("Saving trial metadata.")
self._cached_trial_state[trial.trial_id] = trial.__getstate__()
except Exception:
logger.exception("Error checkpointing trial metadata.")
|
Checkpoints metadata.
Args:
trial (Trial): Trial to checkpoint.
|
def parse_url(self) -> RequestUrl:
"""
่ทๅurl่งฃๆๅฏน่ฑก
"""
if self._URL is None:
current_url = b"%s://%s%s" % (
encode_str(self.schema),
encode_str(self.host),
self._current_url
)
self._URL = RequestUrl(current_url)
return cast(RequestUrl, self._URL)
|
่ทๅurl่งฃๆๅฏน่ฑก
|
def _internal_network_removed(self, ri, port, ex_gw_port):
"""Remove an internal router port
Check to see if this is the last port to be removed for
a given network scoped by a VRF (note: there can be
different mappings between VRFs and networks -- 1-to-1,
1-to-n, n-to-1, n-to-n -- depending on the configuration
and workflow used). If it is the last port, set the flag
indicating that the internal sub-interface for that netowrk
on the ASR should be deleted
"""
itfc_deleted = False
driver = self.driver_manager.get_driver(ri.id)
vrf_name = driver._get_vrf_name(ri)
network_name = ex_gw_port['hosting_info'].get('network_name')
if self._router_ids_by_vrf_and_ext_net.get(
vrf_name, {}).get(network_name) and (
ri.router['id'] in
self._router_ids_by_vrf_and_ext_net[vrf_name][network_name]):
# If this is the last port for this neutron router,
# then remove this router from the list
if len(ri.internal_ports) == 1 and port in ri.internal_ports:
self._router_ids_by_vrf_and_ext_net[
vrf_name][network_name].remove(ri.router['id'])
# Check if any other routers in this VRF have this network,
# and if not, set the flag to remove the interface
if not self._router_ids_by_vrf_and_ext_net[vrf_name].get(
network_name):
LOG.debug("++ REMOVING NETWORK %s" % network_name)
itfc_deleted = True
del self._router_ids_by_vrf_and_ext_net[
vrf_name][network_name]
if not self._router_ids_by_vrf_and_ext_net.get(vrf_name):
del self._router_ids_by_vrf_and_ext_net[vrf_name]
driver.internal_network_removed(ri, port,
itfc_deleted=itfc_deleted)
if ri.snat_enabled and ex_gw_port:
driver.disable_internal_network_NAT(ri, port, ex_gw_port,
itfc_deleted=itfc_deleted)
|
Remove an internal router port
Check to see if this is the last port to be removed for
a given network scoped by a VRF (note: there can be
different mappings between VRFs and networks -- 1-to-1,
1-to-n, n-to-1, n-to-n -- depending on the configuration
and workflow used). If it is the last port, set the flag
indicating that the internal sub-interface for that netowrk
on the ASR should be deleted
|
def pvwatts_ac(pdc, pdc0, eta_inv_nom=0.96, eta_inv_ref=0.9637):
r"""
Implements NREL's PVWatts inverter model [1]_.
.. math::
\eta = \frac{\eta_{nom}}{\eta_{ref}} (-0.0162\zeta - \frac{0.0059}{\zeta} + 0.9858)
.. math::
P_{ac} = \min(\eta P_{dc}, P_{ac0})
where :math:`\zeta=P_{dc}/P_{dc0}` and :math:`P_{dc0}=P_{ac0}/\eta_{nom}`.
Parameters
----------
pdc: numeric
DC power.
pdc0: numeric
Nameplate DC rating.
eta_inv_nom: numeric, default 0.96
Nominal inverter efficiency.
eta_inv_ref: numeric, default 0.9637
Reference inverter efficiency. PVWatts defines it to be 0.9637
and is included here for flexibility.
Returns
-------
pac: numeric
AC power.
References
----------
.. [1] A. P. Dobos, "PVWatts Version 5 Manual,"
http://pvwatts.nrel.gov/downloads/pvwattsv5.pdf
(2014).
"""
pac0 = eta_inv_nom * pdc0
zeta = pdc / pdc0
# arrays to help avoid divide by 0 for scalar and array
eta = np.zeros_like(pdc, dtype=float)
pdc_neq_0 = ~np.equal(pdc, 0)
# eta < 0 if zeta < 0.006. pac is forced to be >= 0 below. GH 541
eta = eta_inv_nom / eta_inv_ref * (
- 0.0162*zeta
- np.divide(0.0059, zeta, out=eta, where=pdc_neq_0)
+ 0.9858)
pac = eta * pdc
pac = np.minimum(pac0, pac)
pac = np.maximum(0, pac) # GH 541
return pac
|
r"""
Implements NREL's PVWatts inverter model [1]_.
.. math::
\eta = \frac{\eta_{nom}}{\eta_{ref}} (-0.0162\zeta - \frac{0.0059}{\zeta} + 0.9858)
.. math::
P_{ac} = \min(\eta P_{dc}, P_{ac0})
where :math:`\zeta=P_{dc}/P_{dc0}` and :math:`P_{dc0}=P_{ac0}/\eta_{nom}`.
Parameters
----------
pdc: numeric
DC power.
pdc0: numeric
Nameplate DC rating.
eta_inv_nom: numeric, default 0.96
Nominal inverter efficiency.
eta_inv_ref: numeric, default 0.9637
Reference inverter efficiency. PVWatts defines it to be 0.9637
and is included here for flexibility.
Returns
-------
pac: numeric
AC power.
References
----------
.. [1] A. P. Dobos, "PVWatts Version 5 Manual,"
http://pvwatts.nrel.gov/downloads/pvwattsv5.pdf
(2014).
|
def find_elb_dns_zone_id(name='', env='dev', region='us-east-1'):
"""Get an application's AWS elb dns zone id.
Args:
name (str): ELB name
env (str): Environment/account of ELB
region (str): AWS Region
Returns:
str: elb DNS zone ID
"""
LOG.info('Find %s ELB DNS Zone ID in %s [%s].', name, env, region)
client = boto3.Session(profile_name=env).client('elb', region_name=region)
elbs = client.describe_load_balancers(LoadBalancerNames=[name])
return elbs['LoadBalancerDescriptions'][0]['CanonicalHostedZoneNameID']
|
Get an application's AWS elb dns zone id.
Args:
name (str): ELB name
env (str): Environment/account of ELB
region (str): AWS Region
Returns:
str: elb DNS zone ID
|
def zeroize():
'''
Resets the device to default factory settings
CLI Example:
.. code-block:: bash
salt 'device_name' junos.zeroize
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
conn.cli('request system zeroize')
ret['message'] = 'Completed zeroize and rebooted'
except Exception as exception:
ret['message'] = 'Could not zeroize due to : "{0}"'.format(exception)
ret['out'] = False
return ret
|
Resets the device to default factory settings
CLI Example:
.. code-block:: bash
salt 'device_name' junos.zeroize
|
def get_private_name(self, f):
""" get private protected name of an attribute
:param str f: name of the private attribute to be accessed.
"""
f = self.__swagger_rename__[f] if f in self.__swagger_rename__.keys() else f
return '_' + self.__class__.__name__ + '__' + f
|
get private protected name of an attribute
:param str f: name of the private attribute to be accessed.
|
def _str(obj):
"""Show nicely the generic object received."""
values = []
for name in obj._attribs:
val = getattr(obj, name)
if isinstance(val, str):
val = repr(val)
val = str(val) if len(str(val)) < 10 else "(...)"
values.append((name, val))
values = ", ".join("{}={}".format(k, v) for k, v in values)
return "{}({})".format(obj.__class__.__name__, values)
|
Show nicely the generic object received.
|
def get_earth_radii(self):
"""Get earth radii from prologue
Returns:
Equatorial radius, polar radius [m]
"""
earth_model = self.prologue['GeometricProcessing']['EarthModel']
a = earth_model['EquatorialRadius'] * 1000
b = (earth_model['NorthPolarRadius'] +
earth_model['SouthPolarRadius']) / 2.0 * 1000
return a, b
|
Get earth radii from prologue
Returns:
Equatorial radius, polar radius [m]
|
def has(self, url, xpath=None):
"""Check if a URL (and xpath) exists in the cache
If DB has not been initialized yet, returns ``False`` for any URL.
Args:
url (str): If given, clear specific item only. Otherwise remove the DB file.
xpath (str): xpath to search (may be ``None``)
Returns:
bool: ``True`` if URL exists, ``False`` otherwise
"""
if not path.exists(self.db_path):
return False
return self._query(url, xpath).count() > 0
|
Check if a URL (and xpath) exists in the cache
If DB has not been initialized yet, returns ``False`` for any URL.
Args:
url (str): If given, clear specific item only. Otherwise remove the DB file.
xpath (str): xpath to search (may be ``None``)
Returns:
bool: ``True`` if URL exists, ``False`` otherwise
|
def _values_of_same_type(self, val1, val2):
"""Checks if two values agree in type.
The sparse parameter is less restrictive than the parameter. If both values
are sparse matrices they are considered to be of same type
regardless of their size and values they contain.
"""
if self._is_supported_matrix(val1) and self._is_supported_matrix(val2):
return True
else:
return super(SparseParameter, self)._values_of_same_type(val1, val2)
|
Checks if two values agree in type.
The sparse parameter is less restrictive than the parameter. If both values
are sparse matrices they are considered to be of same type
regardless of their size and values they contain.
|
def db_open(cls, impl, working_dir):
"""
Open a connection to our chainstate db
"""
path = config.get_snapshots_filename(impl, working_dir)
return cls.db_connect(path)
|
Open a connection to our chainstate db
|
def tf_loss_per_instance(self, states, internals, actions, terminal, reward,
next_states, next_internals, update, reference=None):
"""
Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: Dict of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor.
"""
raise NotImplementedError
|
Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: Dict of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor.
|
def track(self, tracking_number):
"Track a UPS package by number. Returns just a delivery date."
resp = self.send_request(tracking_number)
return self.parse_response(resp)
|
Track a UPS package by number. Returns just a delivery date.
|
def add_quasi_dipole_coordinates(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses Apexpy package to add quasi-dipole coordinates to instrument object.
The Quasi-Dipole coordinate system includes both the tilt and offset of the
geomagnetic field to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
This system is preferred over AACGM near the equator for LEO satellites.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'qd_lat'
for magnetic latitude, 'qd_long' for longitude, and 'mlt' for magnetic local time.
"""
import apexpy
ap = apexpy.Apex(date=inst.date)
qd_lat = []; qd_lon = []; mlt = []
for lat, lon, alt, time in zip(inst[glat_label], inst[glong_label], inst[alt_label],
inst.data.index):
# quasi-dipole latitude and longitude from geodetic coords
tlat, tlon = ap.geo2qd(lat, lon, alt)
qd_lat.append(tlat)
qd_lon.append(tlon)
mlt.append(ap.mlon2mlt(tlon, time))
inst['qd_lat'] = qd_lat
inst['qd_long'] = qd_lon
inst['mlt'] = mlt
inst.meta['qd_lat'] = {'units':'degrees','long_name':'Quasi dipole latitude'}
inst.meta['qd_long'] = {'units':'degrees','long_name':'Quasi dipole longitude'}
inst.meta['qd_mlt'] = {'units':'hrs','long_name':'Magnetic local time'}
return
|
Uses Apexpy package to add quasi-dipole coordinates to instrument object.
The Quasi-Dipole coordinate system includes both the tilt and offset of the
geomagnetic field to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
This system is preferred over AACGM near the equator for LEO satellites.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'qd_lat'
for magnetic latitude, 'qd_long' for longitude, and 'mlt' for magnetic local time.
|
def string_to_sign(self):
"""
The AWS SigV4 string being signed.
"""
return (AWS4_HMAC_SHA256 + "\n" +
self.request_timestamp + "\n" +
self.credential_scope + "\n" +
sha256(self.canonical_request.encode("utf-8")).hexdigest())
|
The AWS SigV4 string being signed.
|
def null(alphabet):
'''
An FSM accepting nothing (not even the empty string). This is
demonstrates that this is possible, and is also extremely useful
in some situations
'''
return fsm(
alphabet = alphabet,
states = {0},
initial = 0,
finals = set(),
map = {
0: dict([(symbol, 0) for symbol in alphabet]),
},
)
|
An FSM accepting nothing (not even the empty string). This is
demonstrates that this is possible, and is also extremely useful
in some situations
|
def _get_number_of_slices(self, slice_type):
"""
Get the number of slices in a certain direction
"""
if slice_type == SliceType.AXIAL:
return self.dimensions[self.axial_orientation.normal_component]
elif slice_type == SliceType.SAGITTAL:
return self.dimensions[self.sagittal_orientation.normal_component]
elif slice_type == SliceType.CORONAL:
return self.dimensions[self.coronal_orientation.normal_component]
|
Get the number of slices in a certain direction
|
def create_pth():
"""
Create the default PTH file
:return:
"""
if prefix == '/usr':
print("Not creating PTH in real prefix: %s" % prefix)
return False
with open(vext_pth, 'w') as f:
f.write(DEFAULT_PTH_CONTENT)
return True
|
Create the default PTH file
:return:
|
def delist(values):
"""Reduce lists of zero or one elements to individual values."""
assert isinstance(values, list)
if not values:
return None
elif len(values) == 1:
return values[0]
return values
|
Reduce lists of zero or one elements to individual values.
|
def bootstrap_vi(version=None, venvargs=None):
'''
Bootstrap virtualenv into current directory
:param str version: Virtualenv version like 13.1.0 or None for latest version
:param list venvargs: argv list for virtualenv.py or None for default
'''
if not version:
version = get_latest_virtualenv_version()
tarball = download_virtualenv(version)
p = subprocess.Popen('tar xzvf {0}'.format(tarball), shell=True)
p.wait()
p = 'virtualenv-{0}'.format(version)
create_virtualenv(p, venvargs)
|
Bootstrap virtualenv into current directory
:param str version: Virtualenv version like 13.1.0 or None for latest version
:param list venvargs: argv list for virtualenv.py or None for default
|
def rgb_color_picker(obj, min_luminance=None, max_luminance=None):
"""Modified version of colour.RGB_color_picker"""
color_value = int.from_bytes(
hashlib.md5(str(obj).encode('utf-8')).digest(),
'little',
) % 0xffffff
color = Color(f'#{color_value:06x}')
if min_luminance and color.get_luminance() < min_luminance:
color.set_luminance(min_luminance)
elif max_luminance and color.get_luminance() > max_luminance:
color.set_luminance(max_luminance)
return color
|
Modified version of colour.RGB_color_picker
|
def pending_items(self) -> Iterable[Tuple[bytes, bytes]]:
"""
A tuple of (key, value) pairs for every key that has been updated.
Like :meth:`pending_keys()`, this does not return any deleted keys.
"""
for key, value in self._changes.items():
if value is not DELETED:
yield key, value
|
A tuple of (key, value) pairs for every key that has been updated.
Like :meth:`pending_keys()`, this does not return any deleted keys.
|
def cmp_ast(node1, node2):
'''
Compare if two nodes are equal.
'''
if type(node1) != type(node2):
return False
if isinstance(node1, (list, tuple)):
if len(node1) != len(node2):
return False
for left, right in zip(node1, node2):
if not cmp_ast(left, right):
return False
elif isinstance(node1, ast.AST):
for field in node1._fields:
left = getattr(node1, field, Undedined)
right = getattr(node2, field, Undedined)
if not cmp_ast(left, right):
return False
else:
return node1 == node2
return True
|
Compare if two nodes are equal.
|
def createResourceMapFromStream(in_stream, base_url=d1_common.const.URL_DATAONE_ROOT):
"""Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects, using a stream of PIDs.
Args:
in_stream:
The first non-blank line is the PID of the resource map itself. Second line is
the science metadata PID and remaining lines are science data PIDs.
Example stream contents:
::
PID_ORE_value
sci_meta_pid_value
data_pid_1
data_pid_2
data_pid_3
base_url : str
Root of the DataONE environment in which the Resource Map will be used.
Returns:
ResourceMap : OAI-ORE Resource Map
"""
pids = []
for line in in_stream:
pid = line.strip()
if pid == "#" or pid.startswith("# "):
continue
if len(pids) < 2:
raise ValueError("Insufficient numbers of identifiers provided.")
logging.info("Read {} identifiers".format(len(pids)))
ore = ResourceMap(base_url=base_url)
logging.info("ORE PID = {}".format(pids[0]))
ore.initialize(pids[0])
logging.info("Metadata PID = {}".format(pids[1]))
ore.addMetadataDocument(pids[1])
ore.addDataDocuments(pids[2:], pids[1])
return ore
|
Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects, using a stream of PIDs.
Args:
in_stream:
The first non-blank line is the PID of the resource map itself. Second line is
the science metadata PID and remaining lines are science data PIDs.
Example stream contents:
::
PID_ORE_value
sci_meta_pid_value
data_pid_1
data_pid_2
data_pid_3
base_url : str
Root of the DataONE environment in which the Resource Map will be used.
Returns:
ResourceMap : OAI-ORE Resource Map
|
def tab(tab_name, element_list=None, section_list=None):
"""
Returns a dictionary representing a new tab to display elements.
This can be thought of as a simple container for displaying multiple
types of information.
Args:
tab_name: The title to display
element_list: The list of elements to display. If a single element is
given it will be wrapped in a list.
section_list: A list of sections to display.
Returns:
A dictionary with metadata specifying that it is to be rendered
as a page containing multiple elements and/or tab.
"""
_tab = {
'Type': 'Tab',
'Title': tab_name,
}
if element_list is not None:
if isinstance(element_list, list):
_tab['Elements'] = element_list
else:
_tab['Elements'] = [element_list]
if section_list is not None:
if isinstance(section_list, list):
_tab['Sections'] = section_list
else:
if 'Elements' not in section_list:
_tab['Elements'] = element_list
else:
_tab['Elements'].append(element_list)
return _tab
|
Returns a dictionary representing a new tab to display elements.
This can be thought of as a simple container for displaying multiple
types of information.
Args:
tab_name: The title to display
element_list: The list of elements to display. If a single element is
given it will be wrapped in a list.
section_list: A list of sections to display.
Returns:
A dictionary with metadata specifying that it is to be rendered
as a page containing multiple elements and/or tab.
|
def output_datacenter(gandi, datacenter, output_keys, justify=14):
""" Helper to output datacenter information."""
output_generic(gandi, datacenter, output_keys, justify)
if 'dc_name' in output_keys:
output_line(gandi, 'datacenter', datacenter['name'], justify)
if 'status' in output_keys:
deactivate_at = datacenter.get('deactivate_at')
if deactivate_at:
output_line(gandi, 'closing on',
deactivate_at.strftime('%d/%m/%Y'), justify)
closing = []
iaas_closed_for = datacenter.get('iaas_closed_for')
if iaas_closed_for == 'ALL':
closing.append('vm')
paas_closed_for = datacenter.get('paas_closed_for')
if paas_closed_for == 'ALL':
closing.append('paas')
if closing:
output_line(gandi, 'closed for', ', '.join(closing), justify)
|
Helper to output datacenter information.
|
def delete_wallet(self, wallet_name):
"""Delete a wallet.
@param the name of the wallet.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
return make_request(
'{}wallet/{}'.format(self.url, wallet_name),
method='DELETE',
timeout=self.timeout,
client=self._client)
|
Delete a wallet.
@param the name of the wallet.
@return a success string from the plans server.
@raise ServerError via make_request.
|
def get_element_by_name(self, el_name, el_idx=0):
"""
Args:
el_name : str
Name of element to get.
el_idx : int
Index of element to use as base in the event that there are multiple sibling
elements with the same name.
Returns:
element : The selected element.
"""
el_list = self.get_element_list_by_name(el_name)
try:
return el_list[el_idx]
except IndexError:
raise SimpleXMLWrapperException(
'Element not found. element_name="{}" requested_idx={} '
'available_elements={}'.format(el_name, el_idx, len(el_list))
)
|
Args:
el_name : str
Name of element to get.
el_idx : int
Index of element to use as base in the event that there are multiple sibling
elements with the same name.
Returns:
element : The selected element.
|
def p_string_literal(self, p):
"""string_literal : STRING"""
p[0] = self.asttypes.String(p[1])
p[0].setpos(p)
|
string_literal : STRING
|
def format_csv(self, delim=',', qu='"'):
"""
Prepares the data in CSV format
"""
res = qu + self.name + qu + delim
if self.data:
for d in self.data:
res += qu + str(d) + qu + delim
return res + '\n'
|
Prepares the data in CSV format
|
def set_config(self, config):
"""Set (replace) the configuration for the session.
Args:
config: Configuration object
"""
with self._conn:
self._conn.execute("DELETE FROM config")
self._conn.execute('INSERT INTO config VALUES(?)',
(serialize_config(config),))
|
Set (replace) the configuration for the session.
Args:
config: Configuration object
|
def joint_sfs(dac1, dac2, n1=None, n2=None):
"""Compute the joint site frequency spectrum between two populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes)
Array where the (i, j)th element is the number of variant sites with i
derived alleles in the first population and j derived alleles in the
second population.
"""
# check inputs
dac1, n1 = _check_dac_n(dac1, n1)
dac2, n2 = _check_dac_n(dac2, n2)
# compute site frequency spectrum
x = n1 + 1
y = n2 + 1
# need platform integer for bincount
tmp = (dac1 * y + dac2).astype(int, copy=False)
s = np.bincount(tmp)
s.resize(x, y)
return s
|
Compute the joint site frequency spectrum between two populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes)
Array where the (i, j)th element is the number of variant sites with i
derived alleles in the first population and j derived alleles in the
second population.
|
def store_disorder(self, sc=None, force_rerun=False):
"""Wrapper for _store_disorder"""
log.info('Loading sequences to reference GEM-PRO...')
from random import shuffle
g_ids = [g.id for g in self.reference_gempro.functional_genes]
shuffle(g_ids)
def _store_disorder_sc(g_id, outdir=self.sequences_by_gene_dir,
g_to_pickle=self.gene_protein_pickles, force_rerun=force_rerun):
"""Load orthologous strain sequences to reference Protein object, save as new pickle"""
import ssbio.utils
import ssbio.io
import os.path as op
protein_seqs_pickle_path = op.join(outdir, '{}_protein_withseqs_dis.pckl'.format(g_id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=protein_seqs_pickle_path):
protein_pickle_path = g_to_pickle[g_id]
protein_pickle = ssbio.io.load_pickle(protein_pickle_path)
protein_pickle.get_all_disorder_predictions(representative_only=False)
protein_pickle.save_pickle(outfile=protein_seqs_pickle_path)
return g_id, protein_seqs_pickle_path
if sc:
genes_rdd = sc.parallelize(g_ids)
result = genes_rdd.map(_store_disorder_sc).collect()
else:
result = []
for g in tqdm(g_ids):
result.append(self._load_sequences_to_reference_gene(g, force_rerun))
log.info('Storing paths to new Protein objects in self.gene_protein_pickles...')
for g_id, protein_pickle in result:
self.gene_protein_pickles[g_id] = protein_pickle
|
Wrapper for _store_disorder
|
def MultimodeCombine(pupils):
"""
Return the instantaneous coherent fluxes and photometric fluxes for a
multiway multimode combiner (no spatial filtering)
"""
fluxes=[np.vdot(pupils[i],pupils[i]).real for i in range(len(pupils))]
coherentFluxes=[np.vdot(pupils[i],pupils[j])
for i in range(1,len(pupils))
for j in range(i)]
return fluxes,coherentFluxes
|
Return the instantaneous coherent fluxes and photometric fluxes for a
multiway multimode combiner (no spatial filtering)
|
def parse_val(cfg,section,option):
"""extract a single value from .cfg"""
vals = parse_vals(cfg,section,option)
if len(vals)==0:
return ''
else:
assert len(vals)==1, (section, option, vals, type(vals))
return vals[0]
|
extract a single value from .cfg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.