code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def file_print(self, filename):
"""Prints a file on the device to console"""
log.info('Printing ' + filename)
res = self.__exchange(PRINT_FILE.format(filename=filename))
log.info(res)
return res | Prints a file on the device to console | Below is the the instruction that describes the task:
### Input:
Prints a file on the device to console
### Response:
def file_print(self, filename):
"""Prints a file on the device to console"""
log.info('Printing ' + filename)
res = self.__exchange(PRINT_FILE.format(filename=filename))
log.info(res)
return res |
def root_manifest_id(self, root_manifest_id):
"""
Sets the root_manifest_id of this UpdateCampaignPutRequest.
:param root_manifest_id: The root_manifest_id of this UpdateCampaignPutRequest.
:type: str
"""
if root_manifest_id is not None and len(root_manifest_id) > 32:
raise ValueError("Invalid value for `root_manifest_id`, length must be less than or equal to `32`")
self._root_manifest_id = root_manifest_id | Sets the root_manifest_id of this UpdateCampaignPutRequest.
:param root_manifest_id: The root_manifest_id of this UpdateCampaignPutRequest.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the root_manifest_id of this UpdateCampaignPutRequest.
:param root_manifest_id: The root_manifest_id of this UpdateCampaignPutRequest.
:type: str
### Response:
def root_manifest_id(self, root_manifest_id):
"""
Sets the root_manifest_id of this UpdateCampaignPutRequest.
:param root_manifest_id: The root_manifest_id of this UpdateCampaignPutRequest.
:type: str
"""
if root_manifest_id is not None and len(root_manifest_id) > 32:
raise ValueError("Invalid value for `root_manifest_id`, length must be less than or equal to `32`")
self._root_manifest_id = root_manifest_id |
def _extra_trust_root_validation(self):
"""
Manually invoked windows certificate chain builder and verification
step when there are extra trust roots to include in the search process
"""
store = None
cert_chain_context_pointer = None
try:
# We set up an in-memory store to pass as an extra store to grab
# certificates from when performing the verification
store = crypt32.CertOpenStore(
Crypt32Const.CERT_STORE_PROV_MEMORY,
Crypt32Const.X509_ASN_ENCODING,
null(),
0,
null()
)
if is_null(store):
handle_crypt32_error(0)
cert_hashes = set()
for cert in self._session._extra_trust_roots:
cert_data = cert.dump()
result = crypt32.CertAddEncodedCertificateToStore(
store,
Crypt32Const.X509_ASN_ENCODING,
cert_data,
len(cert_data),
Crypt32Const.CERT_STORE_ADD_USE_EXISTING,
null()
)
if not result:
handle_crypt32_error(0)
cert_hashes.add(cert.sha256)
cert_context_pointer_pointer = new(crypt32, 'PCERT_CONTEXT *')
result = secur32.QueryContextAttributesW(
self._context_handle_pointer,
Secur32Const.SECPKG_ATTR_REMOTE_CERT_CONTEXT,
cert_context_pointer_pointer
)
handle_error(result)
cert_context_pointer = unwrap(cert_context_pointer_pointer)
cert_context_pointer = cast(crypt32, 'PCERT_CONTEXT', cert_context_pointer)
# We have to do a funky shuffle here because FILETIME from kernel32
# is different than FILETIME from crypt32 when using cffi. If we
# overwrite the "now_pointer" variable, cffi releases the backing
# memory and we end up getting a validation error about certificate
# expiration time.
orig_now_pointer = new(kernel32, 'FILETIME *')
kernel32.GetSystemTimeAsFileTime(orig_now_pointer)
now_pointer = cast(crypt32, 'FILETIME *', orig_now_pointer)
usage_identifiers = new(crypt32, 'char *[3]')
usage_identifiers[0] = cast(crypt32, 'char *', Crypt32Const.PKIX_KP_SERVER_AUTH)
usage_identifiers[1] = cast(crypt32, 'char *', Crypt32Const.SERVER_GATED_CRYPTO)
usage_identifiers[2] = cast(crypt32, 'char *', Crypt32Const.SGC_NETSCAPE)
cert_enhkey_usage_pointer = struct(crypt32, 'CERT_ENHKEY_USAGE')
cert_enhkey_usage = unwrap(cert_enhkey_usage_pointer)
cert_enhkey_usage.cUsageIdentifier = 3
cert_enhkey_usage.rgpszUsageIdentifier = cast(crypt32, 'char **', usage_identifiers)
cert_usage_match_pointer = struct(crypt32, 'CERT_USAGE_MATCH')
cert_usage_match = unwrap(cert_usage_match_pointer)
cert_usage_match.dwType = Crypt32Const.USAGE_MATCH_TYPE_OR
cert_usage_match.Usage = cert_enhkey_usage
cert_chain_para_pointer = struct(crypt32, 'CERT_CHAIN_PARA')
cert_chain_para = unwrap(cert_chain_para_pointer)
cert_chain_para.RequestedUsage = cert_usage_match
cert_chain_para_size = sizeof(crypt32, cert_chain_para)
cert_chain_para.cbSize = cert_chain_para_size
cert_chain_context_pointer_pointer = new(crypt32, 'PCERT_CHAIN_CONTEXT *')
result = crypt32.CertGetCertificateChain(
null(),
cert_context_pointer,
now_pointer,
store,
cert_chain_para_pointer,
Crypt32Const.CERT_CHAIN_CACHE_END_CERT | Crypt32Const.CERT_CHAIN_REVOCATION_CHECK_CACHE_ONLY,
null(),
cert_chain_context_pointer_pointer
)
handle_crypt32_error(result)
cert_chain_policy_para_flags = Crypt32Const.CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGS
cert_chain_context_pointer = unwrap(cert_chain_context_pointer_pointer)
# Unwrap the chain and if the final element in the chain is one of
# extra trust roots, set flags so that we trust the certificate even
# though it is not in the Trusted Roots store
cert_chain_context = unwrap(cert_chain_context_pointer)
num_chains = native(int, cert_chain_context.cChain)
if num_chains == 1:
first_simple_chain_pointer = unwrap(cert_chain_context.rgpChain)
first_simple_chain = unwrap(first_simple_chain_pointer)
num_elements = native(int, first_simple_chain.cElement)
last_element_pointer = first_simple_chain.rgpElement[num_elements - 1]
last_element = unwrap(last_element_pointer)
last_element_cert = unwrap(last_element.pCertContext)
last_element_cert_data = bytes_from_buffer(
last_element_cert.pbCertEncoded,
native(int, last_element_cert.cbCertEncoded)
)
last_cert = x509.Certificate.load(last_element_cert_data)
if last_cert.sha256 in cert_hashes:
cert_chain_policy_para_flags |= Crypt32Const.CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAG
ssl_extra_cert_chain_policy_para_pointer = struct(crypt32, 'SSL_EXTRA_CERT_CHAIN_POLICY_PARA')
ssl_extra_cert_chain_policy_para = unwrap(ssl_extra_cert_chain_policy_para_pointer)
ssl_extra_cert_chain_policy_para.cbSize = sizeof(crypt32, ssl_extra_cert_chain_policy_para)
ssl_extra_cert_chain_policy_para.dwAuthType = Crypt32Const.AUTHTYPE_SERVER
ssl_extra_cert_chain_policy_para.fdwChecks = 0
ssl_extra_cert_chain_policy_para.pwszServerName = cast(
crypt32,
'wchar_t *',
buffer_from_unicode(self._hostname)
)
cert_chain_policy_para_pointer = struct(crypt32, 'CERT_CHAIN_POLICY_PARA')
cert_chain_policy_para = unwrap(cert_chain_policy_para_pointer)
cert_chain_policy_para.cbSize = sizeof(crypt32, cert_chain_policy_para)
cert_chain_policy_para.dwFlags = cert_chain_policy_para_flags
cert_chain_policy_para.pvExtraPolicyPara = cast(crypt32, 'void *', ssl_extra_cert_chain_policy_para_pointer)
cert_chain_policy_status_pointer = struct(crypt32, 'CERT_CHAIN_POLICY_STATUS')
cert_chain_policy_status = unwrap(cert_chain_policy_status_pointer)
cert_chain_policy_status.cbSize = sizeof(crypt32, cert_chain_policy_status)
result = crypt32.CertVerifyCertificateChainPolicy(
Crypt32Const.CERT_CHAIN_POLICY_SSL,
cert_chain_context_pointer,
cert_chain_policy_para_pointer,
cert_chain_policy_status_pointer
)
handle_crypt32_error(result)
cert_context = unwrap(cert_context_pointer)
cert_data = bytes_from_buffer(cert_context.pbCertEncoded, native(int, cert_context.cbCertEncoded))
cert = x509.Certificate.load(cert_data)
error = cert_chain_policy_status.dwError
if error:
if error == Crypt32Const.CERT_E_EXPIRED:
raise_expired_not_yet_valid(cert)
if error == Crypt32Const.CERT_E_UNTRUSTEDROOT:
oscrypto_cert = load_certificate(cert)
if oscrypto_cert.self_signed:
raise_self_signed(cert)
else:
raise_no_issuer(cert)
if error == Crypt32Const.CERT_E_CN_NO_MATCH:
raise_hostname(cert, self._hostname)
if error == Crypt32Const.TRUST_E_CERT_SIGNATURE:
raise_weak_signature(cert)
if error == Crypt32Const.CRYPT_E_REVOKED:
raise_revoked(cert)
raise_verification(cert)
if cert.hash_algo in set(['md5', 'md2']):
raise_weak_signature(cert)
finally:
if store:
crypt32.CertCloseStore(store, 0)
if cert_chain_context_pointer:
crypt32.CertFreeCertificateChain(cert_chain_context_pointer) | Manually invoked windows certificate chain builder and verification
step when there are extra trust roots to include in the search process | Below is the the instruction that describes the task:
### Input:
Manually invoked windows certificate chain builder and verification
step when there are extra trust roots to include in the search process
### Response:
def _extra_trust_root_validation(self):
"""
Manually invoked windows certificate chain builder and verification
step when there are extra trust roots to include in the search process
"""
store = None
cert_chain_context_pointer = None
try:
# We set up an in-memory store to pass as an extra store to grab
# certificates from when performing the verification
store = crypt32.CertOpenStore(
Crypt32Const.CERT_STORE_PROV_MEMORY,
Crypt32Const.X509_ASN_ENCODING,
null(),
0,
null()
)
if is_null(store):
handle_crypt32_error(0)
cert_hashes = set()
for cert in self._session._extra_trust_roots:
cert_data = cert.dump()
result = crypt32.CertAddEncodedCertificateToStore(
store,
Crypt32Const.X509_ASN_ENCODING,
cert_data,
len(cert_data),
Crypt32Const.CERT_STORE_ADD_USE_EXISTING,
null()
)
if not result:
handle_crypt32_error(0)
cert_hashes.add(cert.sha256)
cert_context_pointer_pointer = new(crypt32, 'PCERT_CONTEXT *')
result = secur32.QueryContextAttributesW(
self._context_handle_pointer,
Secur32Const.SECPKG_ATTR_REMOTE_CERT_CONTEXT,
cert_context_pointer_pointer
)
handle_error(result)
cert_context_pointer = unwrap(cert_context_pointer_pointer)
cert_context_pointer = cast(crypt32, 'PCERT_CONTEXT', cert_context_pointer)
# We have to do a funky shuffle here because FILETIME from kernel32
# is different than FILETIME from crypt32 when using cffi. If we
# overwrite the "now_pointer" variable, cffi releases the backing
# memory and we end up getting a validation error about certificate
# expiration time.
orig_now_pointer = new(kernel32, 'FILETIME *')
kernel32.GetSystemTimeAsFileTime(orig_now_pointer)
now_pointer = cast(crypt32, 'FILETIME *', orig_now_pointer)
usage_identifiers = new(crypt32, 'char *[3]')
usage_identifiers[0] = cast(crypt32, 'char *', Crypt32Const.PKIX_KP_SERVER_AUTH)
usage_identifiers[1] = cast(crypt32, 'char *', Crypt32Const.SERVER_GATED_CRYPTO)
usage_identifiers[2] = cast(crypt32, 'char *', Crypt32Const.SGC_NETSCAPE)
cert_enhkey_usage_pointer = struct(crypt32, 'CERT_ENHKEY_USAGE')
cert_enhkey_usage = unwrap(cert_enhkey_usage_pointer)
cert_enhkey_usage.cUsageIdentifier = 3
cert_enhkey_usage.rgpszUsageIdentifier = cast(crypt32, 'char **', usage_identifiers)
cert_usage_match_pointer = struct(crypt32, 'CERT_USAGE_MATCH')
cert_usage_match = unwrap(cert_usage_match_pointer)
cert_usage_match.dwType = Crypt32Const.USAGE_MATCH_TYPE_OR
cert_usage_match.Usage = cert_enhkey_usage
cert_chain_para_pointer = struct(crypt32, 'CERT_CHAIN_PARA')
cert_chain_para = unwrap(cert_chain_para_pointer)
cert_chain_para.RequestedUsage = cert_usage_match
cert_chain_para_size = sizeof(crypt32, cert_chain_para)
cert_chain_para.cbSize = cert_chain_para_size
cert_chain_context_pointer_pointer = new(crypt32, 'PCERT_CHAIN_CONTEXT *')
result = crypt32.CertGetCertificateChain(
null(),
cert_context_pointer,
now_pointer,
store,
cert_chain_para_pointer,
Crypt32Const.CERT_CHAIN_CACHE_END_CERT | Crypt32Const.CERT_CHAIN_REVOCATION_CHECK_CACHE_ONLY,
null(),
cert_chain_context_pointer_pointer
)
handle_crypt32_error(result)
cert_chain_policy_para_flags = Crypt32Const.CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGS
cert_chain_context_pointer = unwrap(cert_chain_context_pointer_pointer)
# Unwrap the chain and if the final element in the chain is one of
# extra trust roots, set flags so that we trust the certificate even
# though it is not in the Trusted Roots store
cert_chain_context = unwrap(cert_chain_context_pointer)
num_chains = native(int, cert_chain_context.cChain)
if num_chains == 1:
first_simple_chain_pointer = unwrap(cert_chain_context.rgpChain)
first_simple_chain = unwrap(first_simple_chain_pointer)
num_elements = native(int, first_simple_chain.cElement)
last_element_pointer = first_simple_chain.rgpElement[num_elements - 1]
last_element = unwrap(last_element_pointer)
last_element_cert = unwrap(last_element.pCertContext)
last_element_cert_data = bytes_from_buffer(
last_element_cert.pbCertEncoded,
native(int, last_element_cert.cbCertEncoded)
)
last_cert = x509.Certificate.load(last_element_cert_data)
if last_cert.sha256 in cert_hashes:
cert_chain_policy_para_flags |= Crypt32Const.CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAG
ssl_extra_cert_chain_policy_para_pointer = struct(crypt32, 'SSL_EXTRA_CERT_CHAIN_POLICY_PARA')
ssl_extra_cert_chain_policy_para = unwrap(ssl_extra_cert_chain_policy_para_pointer)
ssl_extra_cert_chain_policy_para.cbSize = sizeof(crypt32, ssl_extra_cert_chain_policy_para)
ssl_extra_cert_chain_policy_para.dwAuthType = Crypt32Const.AUTHTYPE_SERVER
ssl_extra_cert_chain_policy_para.fdwChecks = 0
ssl_extra_cert_chain_policy_para.pwszServerName = cast(
crypt32,
'wchar_t *',
buffer_from_unicode(self._hostname)
)
cert_chain_policy_para_pointer = struct(crypt32, 'CERT_CHAIN_POLICY_PARA')
cert_chain_policy_para = unwrap(cert_chain_policy_para_pointer)
cert_chain_policy_para.cbSize = sizeof(crypt32, cert_chain_policy_para)
cert_chain_policy_para.dwFlags = cert_chain_policy_para_flags
cert_chain_policy_para.pvExtraPolicyPara = cast(crypt32, 'void *', ssl_extra_cert_chain_policy_para_pointer)
cert_chain_policy_status_pointer = struct(crypt32, 'CERT_CHAIN_POLICY_STATUS')
cert_chain_policy_status = unwrap(cert_chain_policy_status_pointer)
cert_chain_policy_status.cbSize = sizeof(crypt32, cert_chain_policy_status)
result = crypt32.CertVerifyCertificateChainPolicy(
Crypt32Const.CERT_CHAIN_POLICY_SSL,
cert_chain_context_pointer,
cert_chain_policy_para_pointer,
cert_chain_policy_status_pointer
)
handle_crypt32_error(result)
cert_context = unwrap(cert_context_pointer)
cert_data = bytes_from_buffer(cert_context.pbCertEncoded, native(int, cert_context.cbCertEncoded))
cert = x509.Certificate.load(cert_data)
error = cert_chain_policy_status.dwError
if error:
if error == Crypt32Const.CERT_E_EXPIRED:
raise_expired_not_yet_valid(cert)
if error == Crypt32Const.CERT_E_UNTRUSTEDROOT:
oscrypto_cert = load_certificate(cert)
if oscrypto_cert.self_signed:
raise_self_signed(cert)
else:
raise_no_issuer(cert)
if error == Crypt32Const.CERT_E_CN_NO_MATCH:
raise_hostname(cert, self._hostname)
if error == Crypt32Const.TRUST_E_CERT_SIGNATURE:
raise_weak_signature(cert)
if error == Crypt32Const.CRYPT_E_REVOKED:
raise_revoked(cert)
raise_verification(cert)
if cert.hash_algo in set(['md5', 'md2']):
raise_weak_signature(cert)
finally:
if store:
crypt32.CertCloseStore(store, 0)
if cert_chain_context_pointer:
crypt32.CertFreeCertificateChain(cert_chain_context_pointer) |
def _initialize_monitoring_services_queue(self, chain_state: ChainState):
"""Send the monitoring requests for all current balance proofs.
Note:
The node must always send the *received* balance proof to the
monitoring service, *before* sending its own locked transfer
forward. If the monitoring service is updated after, then the
following can happen:
For a transfer A-B-C where this node is B
- B receives T1 from A and processes it
- B forwards its T2 to C
* B crashes (the monitoring service is not updated)
For the above scenario, the monitoring service would not have the
latest balance proof received by B from A available with the lock
for T1, but C would. If the channel B-C is closed and B does not
come back online in time, the funds for the lock L1 can be lost.
During restarts the rationale from above has to be replicated.
Because the initialization code *is not* the same as the event
handler. This means the balance proof updates must be done prior to
the processing of the message queues.
"""
msg = (
'Transport was started before the monitoring service queue was updated. '
'This can lead to safety issue. node:{self!r}'
)
assert not self.transport, msg
msg = (
'The node state was not yet recovered, cant read balance proofs. node:{self!r}'
)
assert self.wal, msg
current_balance_proofs = views.detect_balance_proof_change(
old_state=ChainState(
pseudo_random_generator=chain_state.pseudo_random_generator,
block_number=GENESIS_BLOCK_NUMBER,
block_hash=constants.EMPTY_HASH,
our_address=chain_state.our_address,
chain_id=chain_state.chain_id,
),
current_state=chain_state,
)
for balance_proof in current_balance_proofs:
update_services_from_balance_proof(self, chain_state, balance_proof) | Send the monitoring requests for all current balance proofs.
Note:
The node must always send the *received* balance proof to the
monitoring service, *before* sending its own locked transfer
forward. If the monitoring service is updated after, then the
following can happen:
For a transfer A-B-C where this node is B
- B receives T1 from A and processes it
- B forwards its T2 to C
* B crashes (the monitoring service is not updated)
For the above scenario, the monitoring service would not have the
latest balance proof received by B from A available with the lock
for T1, but C would. If the channel B-C is closed and B does not
come back online in time, the funds for the lock L1 can be lost.
During restarts the rationale from above has to be replicated.
Because the initialization code *is not* the same as the event
handler. This means the balance proof updates must be done prior to
the processing of the message queues. | Below is the the instruction that describes the task:
### Input:
Send the monitoring requests for all current balance proofs.
Note:
The node must always send the *received* balance proof to the
monitoring service, *before* sending its own locked transfer
forward. If the monitoring service is updated after, then the
following can happen:
For a transfer A-B-C where this node is B
- B receives T1 from A and processes it
- B forwards its T2 to C
* B crashes (the monitoring service is not updated)
For the above scenario, the monitoring service would not have the
latest balance proof received by B from A available with the lock
for T1, but C would. If the channel B-C is closed and B does not
come back online in time, the funds for the lock L1 can be lost.
During restarts the rationale from above has to be replicated.
Because the initialization code *is not* the same as the event
handler. This means the balance proof updates must be done prior to
the processing of the message queues.
### Response:
def _initialize_monitoring_services_queue(self, chain_state: ChainState):
"""Send the monitoring requests for all current balance proofs.
Note:
The node must always send the *received* balance proof to the
monitoring service, *before* sending its own locked transfer
forward. If the monitoring service is updated after, then the
following can happen:
For a transfer A-B-C where this node is B
- B receives T1 from A and processes it
- B forwards its T2 to C
* B crashes (the monitoring service is not updated)
For the above scenario, the monitoring service would not have the
latest balance proof received by B from A available with the lock
for T1, but C would. If the channel B-C is closed and B does not
come back online in time, the funds for the lock L1 can be lost.
During restarts the rationale from above has to be replicated.
Because the initialization code *is not* the same as the event
handler. This means the balance proof updates must be done prior to
the processing of the message queues.
"""
msg = (
'Transport was started before the monitoring service queue was updated. '
'This can lead to safety issue. node:{self!r}'
)
assert not self.transport, msg
msg = (
'The node state was not yet recovered, cant read balance proofs. node:{self!r}'
)
assert self.wal, msg
current_balance_proofs = views.detect_balance_proof_change(
old_state=ChainState(
pseudo_random_generator=chain_state.pseudo_random_generator,
block_number=GENESIS_BLOCK_NUMBER,
block_hash=constants.EMPTY_HASH,
our_address=chain_state.our_address,
chain_id=chain_state.chain_id,
),
current_state=chain_state,
)
for balance_proof in current_balance_proofs:
update_services_from_balance_proof(self, chain_state, balance_proof) |
def feed_backend_arthur(backend_name, backend_params):
""" Feed Ocean with backend data collected from arthur redis queue"""
# Always get pending items from arthur for all data sources
feed_arthur()
logger.debug("Items available for %s", arthur_items.keys())
# Get only the items for the backend
if not get_connector_from_name(backend_name):
raise RuntimeError("Unknown backend %s" % backend_name)
connector = get_connector_from_name(backend_name)
klass = connector[3] # BackendCmd for the connector
backend_cmd = init_backend(klass(*backend_params))
tag = backend_cmd.backend.tag
logger.debug("Getting items for %s.", tag)
if tag in arthur_items:
logger.debug("Found items for %s.", tag)
for item in arthur_items[tag]:
yield item | Feed Ocean with backend data collected from arthur redis queue | Below is the the instruction that describes the task:
### Input:
Feed Ocean with backend data collected from arthur redis queue
### Response:
def feed_backend_arthur(backend_name, backend_params):
""" Feed Ocean with backend data collected from arthur redis queue"""
# Always get pending items from arthur for all data sources
feed_arthur()
logger.debug("Items available for %s", arthur_items.keys())
# Get only the items for the backend
if not get_connector_from_name(backend_name):
raise RuntimeError("Unknown backend %s" % backend_name)
connector = get_connector_from_name(backend_name)
klass = connector[3] # BackendCmd for the connector
backend_cmd = init_backend(klass(*backend_params))
tag = backend_cmd.backend.tag
logger.debug("Getting items for %s.", tag)
if tag in arthur_items:
logger.debug("Found items for %s.", tag)
for item in arthur_items[tag]:
yield item |
def sanitize_html(value, valid_tags=VALID_TAGS, strip=True):
"""
Strips unwanted markup out of HTML.
"""
return bleach.clean(value, tags=list(VALID_TAGS.keys()), attributes=VALID_TAGS, strip=strip) | Strips unwanted markup out of HTML. | Below is the the instruction that describes the task:
### Input:
Strips unwanted markup out of HTML.
### Response:
def sanitize_html(value, valid_tags=VALID_TAGS, strip=True):
"""
Strips unwanted markup out of HTML.
"""
return bleach.clean(value, tags=list(VALID_TAGS.keys()), attributes=VALID_TAGS, strip=strip) |
def write(self, data):
"""
Write `data` to the Stream. Not all data may be written right away.
Use select to find when the stream is writeable, and call do_write()
to flush the internal buffer.
"""
if not data:
return None
self.buffer += data
self.do_write()
return len(data) | Write `data` to the Stream. Not all data may be written right away.
Use select to find when the stream is writeable, and call do_write()
to flush the internal buffer. | Below is the the instruction that describes the task:
### Input:
Write `data` to the Stream. Not all data may be written right away.
Use select to find when the stream is writeable, and call do_write()
to flush the internal buffer.
### Response:
def write(self, data):
"""
Write `data` to the Stream. Not all data may be written right away.
Use select to find when the stream is writeable, and call do_write()
to flush the internal buffer.
"""
if not data:
return None
self.buffer += data
self.do_write()
return len(data) |
def unregister(self, event, callback, selector=None):
"""
Unregisters an event that was being monitored.
:param event: Name of the event to monitor
:param callback: Callback function for when the event is received (Params: event, interface).
:param selector: `(Optional)` CSS selector for the element(s) you want to monitor
"""
self.processor.unregister(event, callback, selector) | Unregisters an event that was being monitored.
:param event: Name of the event to monitor
:param callback: Callback function for when the event is received (Params: event, interface).
:param selector: `(Optional)` CSS selector for the element(s) you want to monitor | Below is the the instruction that describes the task:
### Input:
Unregisters an event that was being monitored.
:param event: Name of the event to monitor
:param callback: Callback function for when the event is received (Params: event, interface).
:param selector: `(Optional)` CSS selector for the element(s) you want to monitor
### Response:
def unregister(self, event, callback, selector=None):
"""
Unregisters an event that was being monitored.
:param event: Name of the event to monitor
:param callback: Callback function for when the event is received (Params: event, interface).
:param selector: `(Optional)` CSS selector for the element(s) you want to monitor
"""
self.processor.unregister(event, callback, selector) |
def verify_env(
dirs,
user,
permissive=False,
pki_dir='',
skip_extra=False,
root_dir=ROOT_DIR):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if salt.utils.platform.is_windows():
return win_verify_env(root_dir,
dirs,
permissive=permissive,
skip_extra=skip_extra)
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.user.get_gid_list(user, include_default=False)
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
with salt.utils.files.set_umask(0o022):
os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
mode = os.stat(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
fmode = os.stat(dir_)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
# Allow the directory to be owned by any group root
# belongs to if we say it's ok to be permissive
pass
else:
# chown the file for the new user
os.chown(dir_, uid, gid)
for subdir in [a for a in os.listdir(dir_) if 'jobs' not in a]:
fsubdir = os.path.join(dir_, subdir)
if '{0}jobs'.format(os.path.sep) in fsubdir:
continue
for root, dirs, files in salt.utils.path.os_walk(fsubdir):
for name in files:
if name.startswith('.'):
continue
path = os.path.join(root, name)
try:
fmode = os.stat(path)
except (IOError, OSError):
pass
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
for name in dirs:
path = os.path.join(root, name)
fmode = os.stat(path)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
# Allow the pki dir to be 700 or 750, but nothing else.
# This prevents other users from writing out keys, while
# allowing the use-case of 3rd-party software (like django)
# to read in what it needs to integrate.
#
# If the permissions aren't correct, default to the more secure 700.
# If acls are enabled, the pki_dir needs to remain readable, this
# is still secure because the private keys are still only readable
# by the user running the master
if dir_ == pki_dir:
smode = stat.S_IMODE(mode.st_mode)
if smode != 448 and smode != 488:
if os.access(dir_, os.W_OK):
os.chmod(dir_, 448)
else:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if skip_extra is False:
# Run the extra verification checks
zmq_version() | Verify that the named directories are in place and that the environment
can shake the salt | Below is the the instruction that describes the task:
### Input:
Verify that the named directories are in place and that the environment
can shake the salt
### Response:
def verify_env(
dirs,
user,
permissive=False,
pki_dir='',
skip_extra=False,
root_dir=ROOT_DIR):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if salt.utils.platform.is_windows():
return win_verify_env(root_dir,
dirs,
permissive=permissive,
skip_extra=skip_extra)
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.user.get_gid_list(user, include_default=False)
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
with salt.utils.files.set_umask(0o022):
os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
mode = os.stat(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
fmode = os.stat(dir_)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
# Allow the directory to be owned by any group root
# belongs to if we say it's ok to be permissive
pass
else:
# chown the file for the new user
os.chown(dir_, uid, gid)
for subdir in [a for a in os.listdir(dir_) if 'jobs' not in a]:
fsubdir = os.path.join(dir_, subdir)
if '{0}jobs'.format(os.path.sep) in fsubdir:
continue
for root, dirs, files in salt.utils.path.os_walk(fsubdir):
for name in files:
if name.startswith('.'):
continue
path = os.path.join(root, name)
try:
fmode = os.stat(path)
except (IOError, OSError):
pass
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
for name in dirs:
path = os.path.join(root, name)
fmode = os.stat(path)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
# Allow the pki dir to be 700 or 750, but nothing else.
# This prevents other users from writing out keys, while
# allowing the use-case of 3rd-party software (like django)
# to read in what it needs to integrate.
#
# If the permissions aren't correct, default to the more secure 700.
# If acls are enabled, the pki_dir needs to remain readable, this
# is still secure because the private keys are still only readable
# by the user running the master
if dir_ == pki_dir:
smode = stat.S_IMODE(mode.st_mode)
if smode != 448 and smode != 488:
if os.access(dir_, os.W_OK):
os.chmod(dir_, 448)
else:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
if skip_extra is False:
# Run the extra verification checks
zmq_version() |
def ban(self, member, reason=None, *, request_kick=True):
"""
Ban an occupant from re-joining the MUC.
:param member: The occupant to ban.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the banned member.
:type reason: :class:`str`
:param request_kick: A flag indicating that the member should be
removed from the conversation immediately, too.
:type request_kick: :class:`bool`
`request_kick` is supported by MUC, but setting it to false has no
effect: banned members are always immediately kicked.
.. seealso::
:meth:`.AbstractConversation.ban` for the full interface
specification.
"""
if member.direct_jid is None:
raise ValueError(
"cannot ban members whose direct JID is not "
"known")
yield from self.muc_set_affiliation(
member.direct_jid,
"outcast",
reason=reason
) | Ban an occupant from re-joining the MUC.
:param member: The occupant to ban.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the banned member.
:type reason: :class:`str`
:param request_kick: A flag indicating that the member should be
removed from the conversation immediately, too.
:type request_kick: :class:`bool`
`request_kick` is supported by MUC, but setting it to false has no
effect: banned members are always immediately kicked.
.. seealso::
:meth:`.AbstractConversation.ban` for the full interface
specification. | Below is the the instruction that describes the task:
### Input:
Ban an occupant from re-joining the MUC.
:param member: The occupant to ban.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the banned member.
:type reason: :class:`str`
:param request_kick: A flag indicating that the member should be
removed from the conversation immediately, too.
:type request_kick: :class:`bool`
`request_kick` is supported by MUC, but setting it to false has no
effect: banned members are always immediately kicked.
.. seealso::
:meth:`.AbstractConversation.ban` for the full interface
specification.
### Response:
def ban(self, member, reason=None, *, request_kick=True):
"""
Ban an occupant from re-joining the MUC.
:param member: The occupant to ban.
:type member: :class:`Occupant`
:param reason: A reason to show to the members of the conversation
including the banned member.
:type reason: :class:`str`
:param request_kick: A flag indicating that the member should be
removed from the conversation immediately, too.
:type request_kick: :class:`bool`
`request_kick` is supported by MUC, but setting it to false has no
effect: banned members are always immediately kicked.
.. seealso::
:meth:`.AbstractConversation.ban` for the full interface
specification.
"""
if member.direct_jid is None:
raise ValueError(
"cannot ban members whose direct JID is not "
"known")
yield from self.muc_set_affiliation(
member.direct_jid,
"outcast",
reason=reason
) |
def _label(self, line):
'''_label will parse a Dockerfile label
Parameters
==========
line: the line from the recipe file to parse for CMD
'''
label = self._setup('LABEL', line)
self.labels += [ label ] | _label will parse a Dockerfile label
Parameters
==========
line: the line from the recipe file to parse for CMD | Below is the the instruction that describes the task:
### Input:
_label will parse a Dockerfile label
Parameters
==========
line: the line from the recipe file to parse for CMD
### Response:
def _label(self, line):
'''_label will parse a Dockerfile label
Parameters
==========
line: the line from the recipe file to parse for CMD
'''
label = self._setup('LABEL', line)
self.labels += [ label ] |
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes | Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned. | Below is the the instruction that describes the task:
### Input:
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
### Response:
def _getFullPathName(self, containingNodes):
"""
Returns the full node hierarchy rooted at module name.
The list representing the full path through containing nodes
(starting with the module itself) is returned.
"""
assert isinstance(containingNodes, list)
return [(self.options.fullPathNamespace, 'module')] + containingNodes |
def verify_repo_matches_url(repo, url):
"""Verify ``url`` is a part of ``repo``.
We were using ``startswith()`` for a while, which isn't a good comparison.
This function allows us to ``urlparse`` and compare host and path.
Args:
repo (str): the repo url
url (str): the url to verify is part of the repo
Returns:
bool: ``True`` if the repo matches the url.
"""
repo_parts = urlparse(repo)
url_parts = urlparse(url)
errors = []
repo_path_parts = repo_parts.path.split('/')
url_path_parts = url_parts.path.split('/')
if repo_parts.hostname != url_parts.hostname:
errors.append("verify_repo_matches_url: Hostnames don't match! {} {}".format(
repo_parts.hostname, url_parts.hostname
))
if not url_parts.path.startswith(repo_parts.path) or \
url_path_parts[:len(repo_path_parts)] != repo_path_parts:
errors.append("verify_repo_matches_url: Paths don't match! {} {}".format(
repo_parts.path, url_parts.path
))
if errors:
log.warning("\n".join(errors))
return False
return True | Verify ``url`` is a part of ``repo``.
We were using ``startswith()`` for a while, which isn't a good comparison.
This function allows us to ``urlparse`` and compare host and path.
Args:
repo (str): the repo url
url (str): the url to verify is part of the repo
Returns:
bool: ``True`` if the repo matches the url. | Below is the the instruction that describes the task:
### Input:
Verify ``url`` is a part of ``repo``.
We were using ``startswith()`` for a while, which isn't a good comparison.
This function allows us to ``urlparse`` and compare host and path.
Args:
repo (str): the repo url
url (str): the url to verify is part of the repo
Returns:
bool: ``True`` if the repo matches the url.
### Response:
def verify_repo_matches_url(repo, url):
"""Verify ``url`` is a part of ``repo``.
We were using ``startswith()`` for a while, which isn't a good comparison.
This function allows us to ``urlparse`` and compare host and path.
Args:
repo (str): the repo url
url (str): the url to verify is part of the repo
Returns:
bool: ``True`` if the repo matches the url.
"""
repo_parts = urlparse(repo)
url_parts = urlparse(url)
errors = []
repo_path_parts = repo_parts.path.split('/')
url_path_parts = url_parts.path.split('/')
if repo_parts.hostname != url_parts.hostname:
errors.append("verify_repo_matches_url: Hostnames don't match! {} {}".format(
repo_parts.hostname, url_parts.hostname
))
if not url_parts.path.startswith(repo_parts.path) or \
url_path_parts[:len(repo_path_parts)] != repo_path_parts:
errors.append("verify_repo_matches_url: Paths don't match! {} {}".format(
repo_parts.path, url_parts.path
))
if errors:
log.warning("\n".join(errors))
return False
return True |
def print_diff(diff, color=True):
"""Pretty printing for a diff, if color then we use a simple color scheme
(red for removed lines, green for added lines)."""
import colorama
if not diff:
return
if not color:
colorama.init = lambda autoreset: None
colorama.Fore.RED = ''
colorama.Back.RED = ''
colorama.Fore.GREEN = ''
colorama.deinit = lambda: None
colorama.init(autoreset=True) # TODO use context_manager
for line in diff.splitlines():
if line.startswith('+') and not line.startswith('+++ '):
# Note there shouldn't be trailing whitespace
# but may be nice to generalise this
print(colorama.Fore.GREEN + line)
elif line.startswith('-') and not line.startswith('--- '):
split_whitespace = re.split('(\s+)$', line)
if len(split_whitespace) > 1: # claim it must be 3
line, trailing, _ = split_whitespace
else:
line, trailing = split_whitespace[0], ''
print(colorama.Fore.RED + line, end='')
# give trailing whitespace a RED background
print(colorama.Back.RED + trailing)
elif line == '\ No newline at end of file':
# The assumption here is that there is now a new line...
print(colorama.Fore.RED + line)
else:
print(line)
colorama.deinit() | Pretty printing for a diff, if color then we use a simple color scheme
(red for removed lines, green for added lines). | Below is the the instruction that describes the task:
### Input:
Pretty printing for a diff, if color then we use a simple color scheme
(red for removed lines, green for added lines).
### Response:
def print_diff(diff, color=True):
"""Pretty printing for a diff, if color then we use a simple color scheme
(red for removed lines, green for added lines)."""
import colorama
if not diff:
return
if not color:
colorama.init = lambda autoreset: None
colorama.Fore.RED = ''
colorama.Back.RED = ''
colorama.Fore.GREEN = ''
colorama.deinit = lambda: None
colorama.init(autoreset=True) # TODO use context_manager
for line in diff.splitlines():
if line.startswith('+') and not line.startswith('+++ '):
# Note there shouldn't be trailing whitespace
# but may be nice to generalise this
print(colorama.Fore.GREEN + line)
elif line.startswith('-') and not line.startswith('--- '):
split_whitespace = re.split('(\s+)$', line)
if len(split_whitespace) > 1: # claim it must be 3
line, trailing, _ = split_whitespace
else:
line, trailing = split_whitespace[0], ''
print(colorama.Fore.RED + line, end='')
# give trailing whitespace a RED background
print(colorama.Back.RED + trailing)
elif line == '\ No newline at end of file':
# The assumption here is that there is now a new line...
print(colorama.Fore.RED + line)
else:
print(line)
colorama.deinit() |
def update_elements(self, line, column, charcount, docdelta=0):
"""Updates all the element instances that are children of this module
to have new start and end charindex values based on an operation that
was performed on the module source code.
:arg line: the line number of the *start* of the operation.
:arg column: the column number of the start of the operation.
:arg charcount: the total character length change from the operation.
:arg docdelta: the character length of changes made to types/execs
that are children of the module whose docstrings external to their
definitions were changed.
"""
target = self.charindex(line, column) + charcount
#We are looking for all the instances whose *start* attribute lies
#after this target. Then we update them all by that amount.
#However, we need to be careful because of docdelta. If an elements
#docstring contains the target, we don't want to update it.
if line < self.contains_index:
for t in self.types:
if self._update_char_check(self.types[t], target, docdelta):
self._element_charfix(self.types[t], charcount)
for m in self.members:
if self.members[m].start > target:
self.members[m].start += charcount
self.members[m].end += charcount
self._contains_index = None
else:
for iexec in self.executables:
if self._update_char_check(self.executables[iexec], target, docdelta):
self._element_charfix(self.executables[iexec], charcount) | Updates all the element instances that are children of this module
to have new start and end charindex values based on an operation that
was performed on the module source code.
:arg line: the line number of the *start* of the operation.
:arg column: the column number of the start of the operation.
:arg charcount: the total character length change from the operation.
:arg docdelta: the character length of changes made to types/execs
that are children of the module whose docstrings external to their
definitions were changed. | Below is the the instruction that describes the task:
### Input:
Updates all the element instances that are children of this module
to have new start and end charindex values based on an operation that
was performed on the module source code.
:arg line: the line number of the *start* of the operation.
:arg column: the column number of the start of the operation.
:arg charcount: the total character length change from the operation.
:arg docdelta: the character length of changes made to types/execs
that are children of the module whose docstrings external to their
definitions were changed.
### Response:
def update_elements(self, line, column, charcount, docdelta=0):
"""Updates all the element instances that are children of this module
to have new start and end charindex values based on an operation that
was performed on the module source code.
:arg line: the line number of the *start* of the operation.
:arg column: the column number of the start of the operation.
:arg charcount: the total character length change from the operation.
:arg docdelta: the character length of changes made to types/execs
that are children of the module whose docstrings external to their
definitions were changed.
"""
target = self.charindex(line, column) + charcount
#We are looking for all the instances whose *start* attribute lies
#after this target. Then we update them all by that amount.
#However, we need to be careful because of docdelta. If an elements
#docstring contains the target, we don't want to update it.
if line < self.contains_index:
for t in self.types:
if self._update_char_check(self.types[t], target, docdelta):
self._element_charfix(self.types[t], charcount)
for m in self.members:
if self.members[m].start > target:
self.members[m].start += charcount
self.members[m].end += charcount
self._contains_index = None
else:
for iexec in self.executables:
if self._update_char_check(self.executables[iexec], target, docdelta):
self._element_charfix(self.executables[iexec], charcount) |
def slice_to_SLICE(sliceVals, width):
"""convert python slice to value of SLICE hdl type"""
if sliceVals.step is not None:
raise NotImplementedError()
start = sliceVals.start
stop = sliceVals.stop
if sliceVals.start is None:
start = INT.fromPy(width)
else:
start = toHVal(sliceVals.start)
if sliceVals.stop is None:
stop = INT.fromPy(0)
else:
stop = toHVal(sliceVals.stop)
startIsVal = isinstance(start, Value)
stopIsVal = isinstance(stop, Value)
indexesAreValues = startIsVal and stopIsVal
if indexesAreValues:
updateTime = max(start.updateTime, stop.updateTime)
else:
updateTime = -1
return Slice.getValueCls()((start, stop), SLICE, 1, updateTime) | convert python slice to value of SLICE hdl type | Below is the the instruction that describes the task:
### Input:
convert python slice to value of SLICE hdl type
### Response:
def slice_to_SLICE(sliceVals, width):
"""convert python slice to value of SLICE hdl type"""
if sliceVals.step is not None:
raise NotImplementedError()
start = sliceVals.start
stop = sliceVals.stop
if sliceVals.start is None:
start = INT.fromPy(width)
else:
start = toHVal(sliceVals.start)
if sliceVals.stop is None:
stop = INT.fromPy(0)
else:
stop = toHVal(sliceVals.stop)
startIsVal = isinstance(start, Value)
stopIsVal = isinstance(stop, Value)
indexesAreValues = startIsVal and stopIsVal
if indexesAreValues:
updateTime = max(start.updateTime, stop.updateTime)
else:
updateTime = -1
return Slice.getValueCls()((start, stop), SLICE, 1, updateTime) |
def find_period(y, Tapprox):
""" Find oscillation period of y(t).
Parameter Tapprox is the approximate period. The code finds the time
between 0.7 * Tapprox and 1.3 * Tapprox where y(t)[1] = d/dt theta(t)
vanishes. This is the period.
"""
def dtheta_dt(t):
""" vanishes when dtheta/dt = 0 """
return y(t)[1]
return gv.root.refine(dtheta_dt, (0.7 * Tapprox, 1.3 * Tapprox)) | Find oscillation period of y(t).
Parameter Tapprox is the approximate period. The code finds the time
between 0.7 * Tapprox and 1.3 * Tapprox where y(t)[1] = d/dt theta(t)
vanishes. This is the period. | Below is the the instruction that describes the task:
### Input:
Find oscillation period of y(t).
Parameter Tapprox is the approximate period. The code finds the time
between 0.7 * Tapprox and 1.3 * Tapprox where y(t)[1] = d/dt theta(t)
vanishes. This is the period.
### Response:
def find_period(y, Tapprox):
""" Find oscillation period of y(t).
Parameter Tapprox is the approximate period. The code finds the time
between 0.7 * Tapprox and 1.3 * Tapprox where y(t)[1] = d/dt theta(t)
vanishes. This is the period.
"""
def dtheta_dt(t):
""" vanishes when dtheta/dt = 0 """
return y(t)[1]
return gv.root.refine(dtheta_dt, (0.7 * Tapprox, 1.3 * Tapprox)) |
def _erfc(a):
"""
Port of cephes ``ndtr.c`` ``erfc`` function.
See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtr.c
"""
# approximation for abs(a) < 8 and abs(a) >= 1
P = [
2.46196981473530512524E-10,
5.64189564831068821977E-1,
7.46321056442269912687E0,
4.86371970985681366614E1,
1.96520832956077098242E2,
5.26445194995477358631E2,
9.34528527171957607540E2,
1.02755188689515710272E3,
5.57535335369399327526E2,
]
Q = [
1.32281951154744992508E1,
8.67072140885989742329E1,
3.54937778887819891062E2,
9.75708501743205489753E2,
1.82390916687909736289E3,
2.24633760818710981792E3,
1.65666309194161350182E3,
5.57535340817727675546E2,
]
# approximation for abs(a) >= 8
R = [
5.64189583547755073984E-1,
1.27536670759978104416E0,
5.01905042251180477414E0,
6.16021097993053585195E0,
7.40974269950448939160E0,
2.97886665372100240670E0,
]
S = [
2.26052863220117276590E0,
9.39603524938001434673E0,
1.20489539808096656605E1,
1.70814450747565897222E1,
9.60896809063285878198E0,
3.36907645100081516050E0,
]
# Shortcut special cases
if a == 0:
return 1
if a >= MAXVAL:
return 0
if a <= -MAXVAL:
return 2
x = a
if a < 0:
x = -a
# computationally cheaper to calculate erf for small values, I guess.
if x < 1:
return 1 - erf(a)
z = -a * a
z = math.exp(z)
if x < 8:
p = _polevl(x, P, 8)
q = _p1evl(x, Q, 8)
else:
p = _polevl(x, R, 5)
q = _p1evl(x, S, 6)
y = (z * p) / q
if a < 0:
y = 2 - y
return y | Port of cephes ``ndtr.c`` ``erfc`` function.
See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtr.c | Below is the the instruction that describes the task:
### Input:
Port of cephes ``ndtr.c`` ``erfc`` function.
See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtr.c
### Response:
def _erfc(a):
"""
Port of cephes ``ndtr.c`` ``erfc`` function.
See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtr.c
"""
# approximation for abs(a) < 8 and abs(a) >= 1
P = [
2.46196981473530512524E-10,
5.64189564831068821977E-1,
7.46321056442269912687E0,
4.86371970985681366614E1,
1.96520832956077098242E2,
5.26445194995477358631E2,
9.34528527171957607540E2,
1.02755188689515710272E3,
5.57535335369399327526E2,
]
Q = [
1.32281951154744992508E1,
8.67072140885989742329E1,
3.54937778887819891062E2,
9.75708501743205489753E2,
1.82390916687909736289E3,
2.24633760818710981792E3,
1.65666309194161350182E3,
5.57535340817727675546E2,
]
# approximation for abs(a) >= 8
R = [
5.64189583547755073984E-1,
1.27536670759978104416E0,
5.01905042251180477414E0,
6.16021097993053585195E0,
7.40974269950448939160E0,
2.97886665372100240670E0,
]
S = [
2.26052863220117276590E0,
9.39603524938001434673E0,
1.20489539808096656605E1,
1.70814450747565897222E1,
9.60896809063285878198E0,
3.36907645100081516050E0,
]
# Shortcut special cases
if a == 0:
return 1
if a >= MAXVAL:
return 0
if a <= -MAXVAL:
return 2
x = a
if a < 0:
x = -a
# computationally cheaper to calculate erf for small values, I guess.
if x < 1:
return 1 - erf(a)
z = -a * a
z = math.exp(z)
if x < 8:
p = _polevl(x, P, 8)
q = _p1evl(x, Q, 8)
else:
p = _polevl(x, R, 5)
q = _p1evl(x, S, 6)
y = (z * p) / q
if a < 0:
y = 2 - y
return y |
def gameloop(self):
"""
Loop through all the necessary stuff and end execution when Ctrl+C was hit.
"""
try:
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.event.post(pygame.event.Event(pygame.QUIT))
self.recv_data()
self.update()
self.render()
except KeyboardInterrupt:
pass | Loop through all the necessary stuff and end execution when Ctrl+C was hit. | Below is the the instruction that describes the task:
### Input:
Loop through all the necessary stuff and end execution when Ctrl+C was hit.
### Response:
def gameloop(self):
"""
Loop through all the necessary stuff and end execution when Ctrl+C was hit.
"""
try:
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.event.post(pygame.event.Event(pygame.QUIT))
self.recv_data()
self.update()
self.render()
except KeyboardInterrupt:
pass |
def clips(self, **kwargs):
"""Get related clips and trailers for a movie specified by id
from the API.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('clips')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get related clips and trailers for a movie specified by id
from the API.
Returns:
A dict respresentation of the JSON returned from the API. | Below is the the instruction that describes the task:
### Input:
Get related clips and trailers for a movie specified by id
from the API.
Returns:
A dict respresentation of the JSON returned from the API.
### Response:
def clips(self, **kwargs):
"""Get related clips and trailers for a movie specified by id
from the API.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('clips')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response |
def from_env(parser_modules: t.Optional[t.Union[t.List[str], t.Tuple[str]]] = DEFAULT_PARSER_MODULES,
env: t.Optional[t.Dict[str, str]] = None,
silent: bool = False,
suppress_logs: bool = False,
extra: t.Optional[dict] = None) -> 'ConfigLoader':
"""
Creates an instance of :class:`~django_docker_helpers.config.ConfigLoader`
with parsers initialized from environment variables.
By default it tries to initialize all bundled parsers.
Parsers may be customized with ``parser_modules`` argument or ``CONFIG__PARSERS`` environment variable.
Environment variable has a priority over the method argument.
:param parser_modules: a list of dot-separated module paths
:param env: a dict with environment variables, default is ``os.environ``
:param silent: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param suppress_logs: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param extra: pass extra arguments to *every* parser
:return: an instance of :class:`~django_docker_helpers.config.ConfigLoader`
Example:
::
env = {
'CONFIG__PARSERS': 'EnvironmentParser,RedisParser,YamlParser',
'ENVIRONMENTPARSER__SCOPE': 'nested',
'YAMLPARSER__CONFIG': './tests/data/config.yml',
'REDISPARSER__HOST': 'wtf.test',
'NESTED__VARIABLE': 'i_am_here',
}
loader = ConfigLoader.from_env(env=env)
assert [type(p) for p in loader.parsers] == [EnvironmentParser, RedisParser, YamlParser]
assert loader.get('variable') == 'i_am_here', 'Ensure env copied from ConfigLoader'
loader = ConfigLoader.from_env(parser_modules=['EnvironmentParser'], env={})
"""
env = env or os.environ
extra = extra or {}
environment_parser = EnvironmentParser(scope='config', env=env)
silent = environment_parser.get('silent', silent, coerce_type=bool)
suppress_logs = environment_parser.get('suppress_logs', suppress_logs, coerce_type=bool)
env_parsers = environment_parser.get('parsers', None, coercer=comma_str_to_list)
if not env_parsers and not parser_modules:
raise ValueError('Must specify `CONFIG__PARSERS` env var or `parser_modules`')
if env_parsers:
parser_classes = ConfigLoader.import_parsers(env_parsers)
else:
parser_classes = ConfigLoader.import_parsers(parser_modules)
parsers = []
for parser_class in parser_classes:
parser_options = ConfigLoader.load_parser_options_from_env(parser_class, env=env)
_init_args = inspect.getfullargspec(parser_class.__init__).args
# add extra args if parser's __init__ can take it it
if 'env' in _init_args:
parser_options['env'] = env
for k, v in extra.items():
if k in _init_args:
parser_options[k] = v
parser_instance = parser_class(**parser_options)
parsers.append(parser_instance)
return ConfigLoader(parsers=parsers, silent=silent, suppress_logs=suppress_logs) | Creates an instance of :class:`~django_docker_helpers.config.ConfigLoader`
with parsers initialized from environment variables.
By default it tries to initialize all bundled parsers.
Parsers may be customized with ``parser_modules`` argument or ``CONFIG__PARSERS`` environment variable.
Environment variable has a priority over the method argument.
:param parser_modules: a list of dot-separated module paths
:param env: a dict with environment variables, default is ``os.environ``
:param silent: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param suppress_logs: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param extra: pass extra arguments to *every* parser
:return: an instance of :class:`~django_docker_helpers.config.ConfigLoader`
Example:
::
env = {
'CONFIG__PARSERS': 'EnvironmentParser,RedisParser,YamlParser',
'ENVIRONMENTPARSER__SCOPE': 'nested',
'YAMLPARSER__CONFIG': './tests/data/config.yml',
'REDISPARSER__HOST': 'wtf.test',
'NESTED__VARIABLE': 'i_am_here',
}
loader = ConfigLoader.from_env(env=env)
assert [type(p) for p in loader.parsers] == [EnvironmentParser, RedisParser, YamlParser]
assert loader.get('variable') == 'i_am_here', 'Ensure env copied from ConfigLoader'
loader = ConfigLoader.from_env(parser_modules=['EnvironmentParser'], env={}) | Below is the the instruction that describes the task:
### Input:
Creates an instance of :class:`~django_docker_helpers.config.ConfigLoader`
with parsers initialized from environment variables.
By default it tries to initialize all bundled parsers.
Parsers may be customized with ``parser_modules`` argument or ``CONFIG__PARSERS`` environment variable.
Environment variable has a priority over the method argument.
:param parser_modules: a list of dot-separated module paths
:param env: a dict with environment variables, default is ``os.environ``
:param silent: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param suppress_logs: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param extra: pass extra arguments to *every* parser
:return: an instance of :class:`~django_docker_helpers.config.ConfigLoader`
Example:
::
env = {
'CONFIG__PARSERS': 'EnvironmentParser,RedisParser,YamlParser',
'ENVIRONMENTPARSER__SCOPE': 'nested',
'YAMLPARSER__CONFIG': './tests/data/config.yml',
'REDISPARSER__HOST': 'wtf.test',
'NESTED__VARIABLE': 'i_am_here',
}
loader = ConfigLoader.from_env(env=env)
assert [type(p) for p in loader.parsers] == [EnvironmentParser, RedisParser, YamlParser]
assert loader.get('variable') == 'i_am_here', 'Ensure env copied from ConfigLoader'
loader = ConfigLoader.from_env(parser_modules=['EnvironmentParser'], env={})
### Response:
def from_env(parser_modules: t.Optional[t.Union[t.List[str], t.Tuple[str]]] = DEFAULT_PARSER_MODULES,
env: t.Optional[t.Dict[str, str]] = None,
silent: bool = False,
suppress_logs: bool = False,
extra: t.Optional[dict] = None) -> 'ConfigLoader':
"""
Creates an instance of :class:`~django_docker_helpers.config.ConfigLoader`
with parsers initialized from environment variables.
By default it tries to initialize all bundled parsers.
Parsers may be customized with ``parser_modules`` argument or ``CONFIG__PARSERS`` environment variable.
Environment variable has a priority over the method argument.
:param parser_modules: a list of dot-separated module paths
:param env: a dict with environment variables, default is ``os.environ``
:param silent: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param suppress_logs: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param extra: pass extra arguments to *every* parser
:return: an instance of :class:`~django_docker_helpers.config.ConfigLoader`
Example:
::
env = {
'CONFIG__PARSERS': 'EnvironmentParser,RedisParser,YamlParser',
'ENVIRONMENTPARSER__SCOPE': 'nested',
'YAMLPARSER__CONFIG': './tests/data/config.yml',
'REDISPARSER__HOST': 'wtf.test',
'NESTED__VARIABLE': 'i_am_here',
}
loader = ConfigLoader.from_env(env=env)
assert [type(p) for p in loader.parsers] == [EnvironmentParser, RedisParser, YamlParser]
assert loader.get('variable') == 'i_am_here', 'Ensure env copied from ConfigLoader'
loader = ConfigLoader.from_env(parser_modules=['EnvironmentParser'], env={})
"""
env = env or os.environ
extra = extra or {}
environment_parser = EnvironmentParser(scope='config', env=env)
silent = environment_parser.get('silent', silent, coerce_type=bool)
suppress_logs = environment_parser.get('suppress_logs', suppress_logs, coerce_type=bool)
env_parsers = environment_parser.get('parsers', None, coercer=comma_str_to_list)
if not env_parsers and not parser_modules:
raise ValueError('Must specify `CONFIG__PARSERS` env var or `parser_modules`')
if env_parsers:
parser_classes = ConfigLoader.import_parsers(env_parsers)
else:
parser_classes = ConfigLoader.import_parsers(parser_modules)
parsers = []
for parser_class in parser_classes:
parser_options = ConfigLoader.load_parser_options_from_env(parser_class, env=env)
_init_args = inspect.getfullargspec(parser_class.__init__).args
# add extra args if parser's __init__ can take it it
if 'env' in _init_args:
parser_options['env'] = env
for k, v in extra.items():
if k in _init_args:
parser_options[k] = v
parser_instance = parser_class(**parser_options)
parsers.append(parser_instance)
return ConfigLoader(parsers=parsers, silent=silent, suppress_logs=suppress_logs) |
def yesterday(self) -> datetime:
""" Set the value to yesterday """
self.value = datetime.today() - timedelta(days=1)
return self.value | Set the value to yesterday | Below is the the instruction that describes the task:
### Input:
Set the value to yesterday
### Response:
def yesterday(self) -> datetime:
""" Set the value to yesterday """
self.value = datetime.today() - timedelta(days=1)
return self.value |
def remove_group(self, group):
"""
Remove the specified group.
"""
if not isinstance(group, Group):
raise TypeError("group must be Group")
if group not in self.groups:
raise ValueError("Group doesn't exist / is not bound to this database.")
#save num entries and children before removal to avoid for loop problems
num_entries = len(group.entries)
for i in xrange(num_entries):
self.remove_entry(group.entries[0])
# Recurse down to remove sub-groups
num_children = len(group.children)
for i in xrange(num_children): # We may need to copy this to avoid CME (see below)
self.remove_group(group.children[0])
# Finally remove group from the parent's list.
group.parent.children.remove(group) # Concurrent modification exception? Parent in recursive stack is iterating ...
self.groups.remove(group) | Remove the specified group. | Below is the the instruction that describes the task:
### Input:
Remove the specified group.
### Response:
def remove_group(self, group):
"""
Remove the specified group.
"""
if not isinstance(group, Group):
raise TypeError("group must be Group")
if group not in self.groups:
raise ValueError("Group doesn't exist / is not bound to this database.")
#save num entries and children before removal to avoid for loop problems
num_entries = len(group.entries)
for i in xrange(num_entries):
self.remove_entry(group.entries[0])
# Recurse down to remove sub-groups
num_children = len(group.children)
for i in xrange(num_children): # We may need to copy this to avoid CME (see below)
self.remove_group(group.children[0])
# Finally remove group from the parent's list.
group.parent.children.remove(group) # Concurrent modification exception? Parent in recursive stack is iterating ...
self.groups.remove(group) |
def to_cloudformation(self, **kwargs):
"""Returns the CloudWatch Events Rule and Lambda Permission to which this Schedule event source corresponds.
:param dict kwargs: no existing resources need to be modified
:returns: a list of vanilla CloudFormation Resources, to which this pull event expands
:rtype: list
"""
function = kwargs.get('function')
if not function:
raise TypeError("Missing required keyword argument: function")
resources = []
events_rule = EventsRule(self.logical_id)
resources.append(events_rule)
events_rule.ScheduleExpression = self.Schedule
events_rule.Targets = [self._construct_target(function)]
source_arn = events_rule.get_runtime_attr("arn")
if CONDITION in function.resource_attributes:
events_rule.set_resource_attribute(CONDITION, function.resource_attributes[CONDITION])
resources.append(self._construct_permission(function, source_arn=source_arn))
return resources | Returns the CloudWatch Events Rule and Lambda Permission to which this Schedule event source corresponds.
:param dict kwargs: no existing resources need to be modified
:returns: a list of vanilla CloudFormation Resources, to which this pull event expands
:rtype: list | Below is the the instruction that describes the task:
### Input:
Returns the CloudWatch Events Rule and Lambda Permission to which this Schedule event source corresponds.
:param dict kwargs: no existing resources need to be modified
:returns: a list of vanilla CloudFormation Resources, to which this pull event expands
:rtype: list
### Response:
def to_cloudformation(self, **kwargs):
"""Returns the CloudWatch Events Rule and Lambda Permission to which this Schedule event source corresponds.
:param dict kwargs: no existing resources need to be modified
:returns: a list of vanilla CloudFormation Resources, to which this pull event expands
:rtype: list
"""
function = kwargs.get('function')
if not function:
raise TypeError("Missing required keyword argument: function")
resources = []
events_rule = EventsRule(self.logical_id)
resources.append(events_rule)
events_rule.ScheduleExpression = self.Schedule
events_rule.Targets = [self._construct_target(function)]
source_arn = events_rule.get_runtime_attr("arn")
if CONDITION in function.resource_attributes:
events_rule.set_resource_attribute(CONDITION, function.resource_attributes[CONDITION])
resources.append(self._construct_permission(function, source_arn=source_arn))
return resources |
def flatten(items):
"""
Yield items from any nested iterable.
>>> list(flatten([[1, 2, 3], [[4, 5], 6, 7]]))
[1, 2, 3, 4, 5, 6, 7]
"""
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
for sub_x in flatten(x):
yield sub_x
else:
yield x | Yield items from any nested iterable.
>>> list(flatten([[1, 2, 3], [[4, 5], 6, 7]]))
[1, 2, 3, 4, 5, 6, 7] | Below is the the instruction that describes the task:
### Input:
Yield items from any nested iterable.
>>> list(flatten([[1, 2, 3], [[4, 5], 6, 7]]))
[1, 2, 3, 4, 5, 6, 7]
### Response:
def flatten(items):
"""
Yield items from any nested iterable.
>>> list(flatten([[1, 2, 3], [[4, 5], 6, 7]]))
[1, 2, 3, 4, 5, 6, 7]
"""
for x in items:
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
for sub_x in flatten(x):
yield sub_x
else:
yield x |
def all_thumbnails(path, recursive=True, prefix=None, subdir=None):
"""
Return a dictionary referencing all files which match the thumbnail format.
Each key is a source image filename, relative to path.
Each value is a list of dictionaries as explained in `thumbnails_for_file`.
"""
if prefix is None:
prefix = settings.THUMBNAIL_PREFIX
if subdir is None:
subdir = settings.THUMBNAIL_SUBDIR
thumbnail_files = {}
if not path.endswith('/'):
path = '%s/' % path
len_path = len(path)
if recursive:
all = os.walk(path)
else:
files = []
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
files.append(file)
all = [(path, [], files)]
for dir_, subdirs, files in all:
rel_dir = dir_[len_path:]
for file in files:
thumb = re_thumbnail_file.match(file)
if not thumb:
continue
d = thumb.groupdict()
source_filename = d.pop('source_filename')
if prefix:
source_path, source_filename = os.path.split(source_filename)
if not source_filename.startswith(prefix):
continue
source_filename = os.path.join(
source_path, source_filename[len(prefix):])
d['options'] = d['options'] and d['options'].split('_') or []
if subdir and rel_dir.endswith(subdir):
rel_dir = rel_dir[:-len(subdir)]
# Corner-case bug: if the filename didn't have an extension but did
# have an underscore, the last underscore will get converted to a
# '.'.
m = re.match(r'(.*)_(.*)', source_filename)
if m:
source_filename = '%s.%s' % m.groups()
filename = os.path.join(rel_dir, source_filename)
thumbnail_file = thumbnail_files.setdefault(filename, [])
d['filename'] = os.path.join(dir_, file)
thumbnail_file.append(d)
return thumbnail_files | Return a dictionary referencing all files which match the thumbnail format.
Each key is a source image filename, relative to path.
Each value is a list of dictionaries as explained in `thumbnails_for_file`. | Below is the the instruction that describes the task:
### Input:
Return a dictionary referencing all files which match the thumbnail format.
Each key is a source image filename, relative to path.
Each value is a list of dictionaries as explained in `thumbnails_for_file`.
### Response:
def all_thumbnails(path, recursive=True, prefix=None, subdir=None):
"""
Return a dictionary referencing all files which match the thumbnail format.
Each key is a source image filename, relative to path.
Each value is a list of dictionaries as explained in `thumbnails_for_file`.
"""
if prefix is None:
prefix = settings.THUMBNAIL_PREFIX
if subdir is None:
subdir = settings.THUMBNAIL_SUBDIR
thumbnail_files = {}
if not path.endswith('/'):
path = '%s/' % path
len_path = len(path)
if recursive:
all = os.walk(path)
else:
files = []
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
files.append(file)
all = [(path, [], files)]
for dir_, subdirs, files in all:
rel_dir = dir_[len_path:]
for file in files:
thumb = re_thumbnail_file.match(file)
if not thumb:
continue
d = thumb.groupdict()
source_filename = d.pop('source_filename')
if prefix:
source_path, source_filename = os.path.split(source_filename)
if not source_filename.startswith(prefix):
continue
source_filename = os.path.join(
source_path, source_filename[len(prefix):])
d['options'] = d['options'] and d['options'].split('_') or []
if subdir and rel_dir.endswith(subdir):
rel_dir = rel_dir[:-len(subdir)]
# Corner-case bug: if the filename didn't have an extension but did
# have an underscore, the last underscore will get converted to a
# '.'.
m = re.match(r'(.*)_(.*)', source_filename)
if m:
source_filename = '%s.%s' % m.groups()
filename = os.path.join(rel_dir, source_filename)
thumbnail_file = thumbnail_files.setdefault(filename, [])
d['filename'] = os.path.join(dir_, file)
thumbnail_file.append(d)
return thumbnail_files |
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf) | Concatenation that escapes if necessary and converts to unicode. | Below is the the instruction that describes the task:
### Input:
Concatenation that escapes if necessary and converts to unicode.
### Response:
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf) |
def main(name, options):
"""The main method for this script.
:param name: The name of the VM to create.
:type name: str
:param template_name: The name of the template to use for creating \
the VM.
:type template_name: str
"""
server = config._config_value("general", "server", options.server)
if server is None:
raise ValueError("server must be supplied on command line"
" or in configuration file.")
username = config._config_value("general", "username", options.username)
if username is None:
raise ValueError("username must be supplied on command line"
" or in configuration file.")
password = config._config_value("general", "password", options.password)
if password is None:
raise ValueError("password must be supplied on command line"
" or in configuration file.")
vm_template = None
if options.template is not None:
try:
vm_template = template.load_template(options.template)
except TemplateNotFoundError:
print("ERROR: Template \"%s\" could not be found." % options.template)
sys.exit(1)
expected_opts = ["compute_resource", "datastore", "disksize", "nics",
"memory", "num_cpus", "guest_id", "host"]
vm_opts = {}
for opt in expected_opts:
vm_opts[opt] = getattr(options, opt)
if vm_opts[opt] is None:
if vm_template is None:
raise ValueError("%s not specified on the command line and"
" you have not specified any template to"
" inherit the value from." % opt)
try:
vm_opts[opt] = vm_template[opt]
except AttributeError:
raise ValueError("%s not specified on the command line and"
" no value is provided in the specified"
" template." % opt)
client = Client(server=server, username=username, password=password)
create_vm(client, name, vm_opts["compute_resource"], vm_opts["datastore"],
vm_opts["disksize"], vm_opts["nics"], vm_opts["memory"],
vm_opts["num_cpus"], vm_opts["guest_id"], host=vm_opts["host"])
client.logout() | The main method for this script.
:param name: The name of the VM to create.
:type name: str
:param template_name: The name of the template to use for creating \
the VM.
:type template_name: str | Below is the the instruction that describes the task:
### Input:
The main method for this script.
:param name: The name of the VM to create.
:type name: str
:param template_name: The name of the template to use for creating \
the VM.
:type template_name: str
### Response:
def main(name, options):
"""The main method for this script.
:param name: The name of the VM to create.
:type name: str
:param template_name: The name of the template to use for creating \
the VM.
:type template_name: str
"""
server = config._config_value("general", "server", options.server)
if server is None:
raise ValueError("server must be supplied on command line"
" or in configuration file.")
username = config._config_value("general", "username", options.username)
if username is None:
raise ValueError("username must be supplied on command line"
" or in configuration file.")
password = config._config_value("general", "password", options.password)
if password is None:
raise ValueError("password must be supplied on command line"
" or in configuration file.")
vm_template = None
if options.template is not None:
try:
vm_template = template.load_template(options.template)
except TemplateNotFoundError:
print("ERROR: Template \"%s\" could not be found." % options.template)
sys.exit(1)
expected_opts = ["compute_resource", "datastore", "disksize", "nics",
"memory", "num_cpus", "guest_id", "host"]
vm_opts = {}
for opt in expected_opts:
vm_opts[opt] = getattr(options, opt)
if vm_opts[opt] is None:
if vm_template is None:
raise ValueError("%s not specified on the command line and"
" you have not specified any template to"
" inherit the value from." % opt)
try:
vm_opts[opt] = vm_template[opt]
except AttributeError:
raise ValueError("%s not specified on the command line and"
" no value is provided in the specified"
" template." % opt)
client = Client(server=server, username=username, password=password)
create_vm(client, name, vm_opts["compute_resource"], vm_opts["datastore"],
vm_opts["disksize"], vm_opts["nics"], vm_opts["memory"],
vm_opts["num_cpus"], vm_opts["guest_id"], host=vm_opts["host"])
client.logout() |
def a_send_username(username, ctx):
"""Sent the username text."""
if username:
ctx.ctrl.sendline(username)
return True
else:
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError("Username not provided", ctx.ctrl.hostname) | Sent the username text. | Below is the the instruction that describes the task:
### Input:
Sent the username text.
### Response:
def a_send_username(username, ctx):
"""Sent the username text."""
if username:
ctx.ctrl.sendline(username)
return True
else:
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError("Username not provided", ctx.ctrl.hostname) |
def dump(self, file_name, page_size=10, vtimeout=10, sep='\n'):
"""Utility function to dump the messages in a queue to a file
NOTE: Page size must be < 10 else SQS errors"""
fp = open(file_name, 'wb')
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
fp.write(m.get_body())
if sep:
fp.write(sep)
n += 1
l = self.get_messages(page_size, vtimeout)
fp.close()
return n | Utility function to dump the messages in a queue to a file
NOTE: Page size must be < 10 else SQS errors | Below is the the instruction that describes the task:
### Input:
Utility function to dump the messages in a queue to a file
NOTE: Page size must be < 10 else SQS errors
### Response:
def dump(self, file_name, page_size=10, vtimeout=10, sep='\n'):
"""Utility function to dump the messages in a queue to a file
NOTE: Page size must be < 10 else SQS errors"""
fp = open(file_name, 'wb')
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
fp.write(m.get_body())
if sep:
fp.write(sep)
n += 1
l = self.get_messages(page_size, vtimeout)
fp.close()
return n |
def __reorganize_funding(self):
"""
Funding gets added to noaa_data_sorted with LiPD keys. Change those keys to NOAA
:return none:
"""
_map = {"agency": "Funding_Agency_Name", "grant": "Grant"}
try:
_l = []
for item in self.noaa_data_sorted["Funding_Agency"]:
_tmp = {}
for lpd_name, noaa_name in _map.items():
val = ""
if lpd_name in item:
val = item[lpd_name]
_tmp[noaa_name] = val
_l.append(_tmp)
self.noaa_data_sorted["Funding_Agency"] = _l
except Exception:
pass
return | Funding gets added to noaa_data_sorted with LiPD keys. Change those keys to NOAA
:return none: | Below is the the instruction that describes the task:
### Input:
Funding gets added to noaa_data_sorted with LiPD keys. Change those keys to NOAA
:return none:
### Response:
def __reorganize_funding(self):
"""
Funding gets added to noaa_data_sorted with LiPD keys. Change those keys to NOAA
:return none:
"""
_map = {"agency": "Funding_Agency_Name", "grant": "Grant"}
try:
_l = []
for item in self.noaa_data_sorted["Funding_Agency"]:
_tmp = {}
for lpd_name, noaa_name in _map.items():
val = ""
if lpd_name in item:
val = item[lpd_name]
_tmp[noaa_name] = val
_l.append(_tmp)
self.noaa_data_sorted["Funding_Agency"] = _l
except Exception:
pass
return |
def virtualenv(
state, host, path,
python=None, site_packages=False, always_copy=False, present=True,
):
'''
Add/remove Python virtualenvs.
+ python: python interpreter to use
+ site_packages: give access to the global site-packages
+ always_copy: always copy files rather than symlinking
+ present: whether the virtualenv should exist
'''
if present is False and host.fact.directory(path):
yield files.directory(state, host, path, present=False)
elif present and not host.fact.directory(path):
# Create missing virtualenv
command = ['virtualenv']
if python:
command.append('-p {0}'.format(python))
if site_packages:
command.append('--system-site-packages')
if always_copy:
command.append('--always-copy')
command.append(path)
yield ' '.join(command) | Add/remove Python virtualenvs.
+ python: python interpreter to use
+ site_packages: give access to the global site-packages
+ always_copy: always copy files rather than symlinking
+ present: whether the virtualenv should exist | Below is the the instruction that describes the task:
### Input:
Add/remove Python virtualenvs.
+ python: python interpreter to use
+ site_packages: give access to the global site-packages
+ always_copy: always copy files rather than symlinking
+ present: whether the virtualenv should exist
### Response:
def virtualenv(
state, host, path,
python=None, site_packages=False, always_copy=False, present=True,
):
'''
Add/remove Python virtualenvs.
+ python: python interpreter to use
+ site_packages: give access to the global site-packages
+ always_copy: always copy files rather than symlinking
+ present: whether the virtualenv should exist
'''
if present is False and host.fact.directory(path):
yield files.directory(state, host, path, present=False)
elif present and not host.fact.directory(path):
# Create missing virtualenv
command = ['virtualenv']
if python:
command.append('-p {0}'.format(python))
if site_packages:
command.append('--system-site-packages')
if always_copy:
command.append('--always-copy')
command.append(path)
yield ' '.join(command) |
async def delay(source, delay):
"""Delay the iteration of an asynchronous sequence."""
await asyncio.sleep(delay)
async with streamcontext(source) as streamer:
async for item in streamer:
yield item | Delay the iteration of an asynchronous sequence. | Below is the the instruction that describes the task:
### Input:
Delay the iteration of an asynchronous sequence.
### Response:
async def delay(source, delay):
"""Delay the iteration of an asynchronous sequence."""
await asyncio.sleep(delay)
async with streamcontext(source) as streamer:
async for item in streamer:
yield item |
def MakePmfFromCdf(cdf, name=None):
"""Makes a normalized Pmf from a Cdf object.
Args:
cdf: Cdf object
name: string name for the new Pmf
Returns:
Pmf object
"""
if name is None:
name = cdf.name
pmf = Pmf(name=name)
prev = 0.0
for val, prob in cdf.Items():
pmf.Incr(val, prob - prev)
prev = prob
return pmf | Makes a normalized Pmf from a Cdf object.
Args:
cdf: Cdf object
name: string name for the new Pmf
Returns:
Pmf object | Below is the the instruction that describes the task:
### Input:
Makes a normalized Pmf from a Cdf object.
Args:
cdf: Cdf object
name: string name for the new Pmf
Returns:
Pmf object
### Response:
def MakePmfFromCdf(cdf, name=None):
"""Makes a normalized Pmf from a Cdf object.
Args:
cdf: Cdf object
name: string name for the new Pmf
Returns:
Pmf object
"""
if name is None:
name = cdf.name
pmf = Pmf(name=name)
prev = 0.0
for val, prob in cdf.Items():
pmf.Incr(val, prob - prev)
prev = prob
return pmf |
def send_text(self, text, **options):
"""
Send a text message to the chat.
:param str text: Text of the message to send
:param options: Additional sendMessage options (see
https://core.telegram.org/bots/api#sendmessage
"""
return self.bot.send_message(self.id, text, **options) | Send a text message to the chat.
:param str text: Text of the message to send
:param options: Additional sendMessage options (see
https://core.telegram.org/bots/api#sendmessage | Below is the the instruction that describes the task:
### Input:
Send a text message to the chat.
:param str text: Text of the message to send
:param options: Additional sendMessage options (see
https://core.telegram.org/bots/api#sendmessage
### Response:
def send_text(self, text, **options):
"""
Send a text message to the chat.
:param str text: Text of the message to send
:param options: Additional sendMessage options (see
https://core.telegram.org/bots/api#sendmessage
"""
return self.bot.send_message(self.id, text, **options) |
def cancel_transfer_operation(self, operation_name):
"""
Cancels an transfer operation in Google Storage Transfer Service.
:param operation_name: Name of the transfer operation.
:type operation_name: str
:rtype: None
"""
self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries) | Cancels an transfer operation in Google Storage Transfer Service.
:param operation_name: Name of the transfer operation.
:type operation_name: str
:rtype: None | Below is the the instruction that describes the task:
### Input:
Cancels an transfer operation in Google Storage Transfer Service.
:param operation_name: Name of the transfer operation.
:type operation_name: str
:rtype: None
### Response:
def cancel_transfer_operation(self, operation_name):
"""
Cancels an transfer operation in Google Storage Transfer Service.
:param operation_name: Name of the transfer operation.
:type operation_name: str
:rtype: None
"""
self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries) |
def log(self, *args):
'''
get/set the internal pipeline log message object.
Caller can further manipulate the log object with object-specific
calls.
'''
if len(args):
self._log = args[0]
else:
return self._log | get/set the internal pipeline log message object.
Caller can further manipulate the log object with object-specific
calls. | Below is the the instruction that describes the task:
### Input:
get/set the internal pipeline log message object.
Caller can further manipulate the log object with object-specific
calls.
### Response:
def log(self, *args):
'''
get/set the internal pipeline log message object.
Caller can further manipulate the log object with object-specific
calls.
'''
if len(args):
self._log = args[0]
else:
return self._log |
def readSettings(settings_path=None):
global _settings
'''
Reads the settings corresponding to the plugin from where the method is called.
This function has to be called in the __init__ method of the plugin class.
Settings are stored in a settings.json file in the plugin folder.
Here is an eample of such a file:
[
{"name":"mysetting",
"label": "My setting",
"description": "A setting to customize my plugin",
"type": "string",
"default": "dummy string",
"group": "Group 1"
"onEdit": "def f():\\n\\tprint "Value edited in settings dialog"
"onChange": "def f():\\n\\tprint "New settings value has been saved"
},
{"name":"anothersetting",
"label": "Another setting",
"description": "Another setting to customize my plugin",
"type": "number",
"default": 0,
"group": "Group 2"
},
{"name":"achoicesetting",
"label": "A choice setting",
"description": "A setting to select from a set of possible options",
"type": "choice",
"default": "option 1",
"options":["option 1", "option 2", "option 3"],
"group": "Group 2"
}
]
Available types for settings are: string, bool, number, choice, crs and text (a multiline string)
The onEdit property contains a function that will be executed when the user edits the value
in the settings dialog. It shouldl return false if, after it has been executed, the setting
should not be modified and should recover its original value.
The onEdit property contains a function that will be executed when the setting is changed after
closing the settings dialog, or programatically by callin the setPluginSetting method
Both onEdit and onChange are optional properties
'''
namespace = _callerName().split(".")[0]
settings_path = settings_path or os.path.join(os.path.dirname(_callerPath()), "settings.json")
with open(settings_path) as f:
_settings[namespace] = json.load(f) | Reads the settings corresponding to the plugin from where the method is called.
This function has to be called in the __init__ method of the plugin class.
Settings are stored in a settings.json file in the plugin folder.
Here is an eample of such a file:
[
{"name":"mysetting",
"label": "My setting",
"description": "A setting to customize my plugin",
"type": "string",
"default": "dummy string",
"group": "Group 1"
"onEdit": "def f():\\n\\tprint "Value edited in settings dialog"
"onChange": "def f():\\n\\tprint "New settings value has been saved"
},
{"name":"anothersetting",
"label": "Another setting",
"description": "Another setting to customize my plugin",
"type": "number",
"default": 0,
"group": "Group 2"
},
{"name":"achoicesetting",
"label": "A choice setting",
"description": "A setting to select from a set of possible options",
"type": "choice",
"default": "option 1",
"options":["option 1", "option 2", "option 3"],
"group": "Group 2"
}
]
Available types for settings are: string, bool, number, choice, crs and text (a multiline string)
The onEdit property contains a function that will be executed when the user edits the value
in the settings dialog. It shouldl return false if, after it has been executed, the setting
should not be modified and should recover its original value.
The onEdit property contains a function that will be executed when the setting is changed after
closing the settings dialog, or programatically by callin the setPluginSetting method
Both onEdit and onChange are optional properties | Below is the the instruction that describes the task:
### Input:
Reads the settings corresponding to the plugin from where the method is called.
This function has to be called in the __init__ method of the plugin class.
Settings are stored in a settings.json file in the plugin folder.
Here is an eample of such a file:
[
{"name":"mysetting",
"label": "My setting",
"description": "A setting to customize my plugin",
"type": "string",
"default": "dummy string",
"group": "Group 1"
"onEdit": "def f():\\n\\tprint "Value edited in settings dialog"
"onChange": "def f():\\n\\tprint "New settings value has been saved"
},
{"name":"anothersetting",
"label": "Another setting",
"description": "Another setting to customize my plugin",
"type": "number",
"default": 0,
"group": "Group 2"
},
{"name":"achoicesetting",
"label": "A choice setting",
"description": "A setting to select from a set of possible options",
"type": "choice",
"default": "option 1",
"options":["option 1", "option 2", "option 3"],
"group": "Group 2"
}
]
Available types for settings are: string, bool, number, choice, crs and text (a multiline string)
The onEdit property contains a function that will be executed when the user edits the value
in the settings dialog. It shouldl return false if, after it has been executed, the setting
should not be modified and should recover its original value.
The onEdit property contains a function that will be executed when the setting is changed after
closing the settings dialog, or programatically by callin the setPluginSetting method
Both onEdit and onChange are optional properties
### Response:
def readSettings(settings_path=None):
global _settings
'''
Reads the settings corresponding to the plugin from where the method is called.
This function has to be called in the __init__ method of the plugin class.
Settings are stored in a settings.json file in the plugin folder.
Here is an eample of such a file:
[
{"name":"mysetting",
"label": "My setting",
"description": "A setting to customize my plugin",
"type": "string",
"default": "dummy string",
"group": "Group 1"
"onEdit": "def f():\\n\\tprint "Value edited in settings dialog"
"onChange": "def f():\\n\\tprint "New settings value has been saved"
},
{"name":"anothersetting",
"label": "Another setting",
"description": "Another setting to customize my plugin",
"type": "number",
"default": 0,
"group": "Group 2"
},
{"name":"achoicesetting",
"label": "A choice setting",
"description": "A setting to select from a set of possible options",
"type": "choice",
"default": "option 1",
"options":["option 1", "option 2", "option 3"],
"group": "Group 2"
}
]
Available types for settings are: string, bool, number, choice, crs and text (a multiline string)
The onEdit property contains a function that will be executed when the user edits the value
in the settings dialog. It shouldl return false if, after it has been executed, the setting
should not be modified and should recover its original value.
The onEdit property contains a function that will be executed when the setting is changed after
closing the settings dialog, or programatically by callin the setPluginSetting method
Both onEdit and onChange are optional properties
'''
namespace = _callerName().split(".")[0]
settings_path = settings_path or os.path.join(os.path.dirname(_callerPath()), "settings.json")
with open(settings_path) as f:
_settings[namespace] = json.load(f) |
def removeKeyButtonEvent(self, buttons= [] ):
"""!
\~english
Remove key button event callbacks
@param buttons: an array of button Ids. eg. [ 12,13,15, ...]
\~chinese
移除按键事件回调
@param buttons: 按钮ID数组。 例如: [12,13,15,...]
"""
for i in range( 0, len(buttons)-1 ):
GPIO.remove_event_detect( buttons[i] ) | !
\~english
Remove key button event callbacks
@param buttons: an array of button Ids. eg. [ 12,13,15, ...]
\~chinese
移除按键事件回调
@param buttons: 按钮ID数组。 例如: [12,13,15,...] | Below is the the instruction that describes the task:
### Input:
!
\~english
Remove key button event callbacks
@param buttons: an array of button Ids. eg. [ 12,13,15, ...]
\~chinese
移除按键事件回调
@param buttons: 按钮ID数组。 例如: [12,13,15,...]
### Response:
def removeKeyButtonEvent(self, buttons= [] ):
"""!
\~english
Remove key button event callbacks
@param buttons: an array of button Ids. eg. [ 12,13,15, ...]
\~chinese
移除按键事件回调
@param buttons: 按钮ID数组。 例如: [12,13,15,...]
"""
for i in range( 0, len(buttons)-1 ):
GPIO.remove_event_detect( buttons[i] ) |
def result(self, value):
"""The result of the command."""
if self._process_result:
self._result = self._process_result(value)
self._raw_result = value | The result of the command. | Below is the the instruction that describes the task:
### Input:
The result of the command.
### Response:
def result(self, value):
"""The result of the command."""
if self._process_result:
self._result = self._process_result(value)
self._raw_result = value |
def _key_parenleft(self, text):
"""Action for '('"""
self.hide_completion_widget()
if self.get_current_line_to_cursor():
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.insert_text(text)
self.show_object_info(last_obj, call=True)
return
self.insert_text(text) | Action for '( | Below is the the instruction that describes the task:
### Input:
Action for '(
### Response:
def _key_parenleft(self, text):
"""Action for '('"""
self.hide_completion_widget()
if self.get_current_line_to_cursor():
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.insert_text(text)
self.show_object_info(last_obj, call=True)
return
self.insert_text(text) |
def segmentation_image_simple1():
"Perfect"
parameters = legion_parameters();
parameters.eps = 0.02;
parameters.alpha = 0.005;
parameters.betta = 0.1;
parameters.gamma = 7.0;
parameters.teta = 0.9;
parameters.lamda = 0.1;
parameters.teta_x = -0.5;
parameters.teta_p = 7.0;
parameters.Wz = 0.7;
parameters.mu = 0.01;
parameters.fi = 3.0;
parameters.teta_xz = 0.1;
parameters.teta_zx = 0.1;
parameters.ENABLE_POTENTIONAL = False;
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE12, parameters, 2000, 2000, True); | Perfect | Below is the the instruction that describes the task:
### Input:
Perfect
### Response:
def segmentation_image_simple1():
"Perfect"
parameters = legion_parameters();
parameters.eps = 0.02;
parameters.alpha = 0.005;
parameters.betta = 0.1;
parameters.gamma = 7.0;
parameters.teta = 0.9;
parameters.lamda = 0.1;
parameters.teta_x = -0.5;
parameters.teta_p = 7.0;
parameters.Wz = 0.7;
parameters.mu = 0.01;
parameters.fi = 3.0;
parameters.teta_xz = 0.1;
parameters.teta_zx = 0.1;
parameters.ENABLE_POTENTIONAL = False;
template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE12, parameters, 2000, 2000, True); |
def next_frame_stochastic_discrete_range(rhp):
"""Next frame stochastic discrete tuning grid."""
rhp.set_float("learning_rate_constant", 0.001, 0.01)
rhp.set_float("dropout", 0.2, 0.6)
rhp.set_int("filter_double_steps", 3, 5)
rhp.set_discrete("hidden_size", [64, 96, 128])
rhp.set_discrete("bottleneck_bits", [32, 64, 128, 256])
rhp.set_discrete("video_num_target_frames", [4])
rhp.set_float("bottleneck_noise", 0.0, 0.2) | Next frame stochastic discrete tuning grid. | Below is the the instruction that describes the task:
### Input:
Next frame stochastic discrete tuning grid.
### Response:
def next_frame_stochastic_discrete_range(rhp):
"""Next frame stochastic discrete tuning grid."""
rhp.set_float("learning_rate_constant", 0.001, 0.01)
rhp.set_float("dropout", 0.2, 0.6)
rhp.set_int("filter_double_steps", 3, 5)
rhp.set_discrete("hidden_size", [64, 96, 128])
rhp.set_discrete("bottleneck_bits", [32, 64, 128, 256])
rhp.set_discrete("video_num_target_frames", [4])
rhp.set_float("bottleneck_noise", 0.0, 0.2) |
def write_daemon(self):
"""Write thread."""
while True:
(message, callback) = self._write_queue.get(block=True)
self.logger.info("Sending message on USB bus: %s", str(message))
self.logger.debug("Sending binary message: %s", str(message.to_binary()))
self._reader.write(message.to_binary())
time.sleep(self.SLEEP_TIME)
if callback:
callback() | Write thread. | Below is the the instruction that describes the task:
### Input:
Write thread.
### Response:
def write_daemon(self):
"""Write thread."""
while True:
(message, callback) = self._write_queue.get(block=True)
self.logger.info("Sending message on USB bus: %s", str(message))
self.logger.debug("Sending binary message: %s", str(message.to_binary()))
self._reader.write(message.to_binary())
time.sleep(self.SLEEP_TIME)
if callback:
callback() |
def _requiredSize(shape, dtype):
"""
Determines the number of bytes required to store a NumPy array with
the specified shape and datatype.
"""
return math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize) | Determines the number of bytes required to store a NumPy array with
the specified shape and datatype. | Below is the the instruction that describes the task:
### Input:
Determines the number of bytes required to store a NumPy array with
the specified shape and datatype.
### Response:
def _requiredSize(shape, dtype):
"""
Determines the number of bytes required to store a NumPy array with
the specified shape and datatype.
"""
return math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize) |
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
# TODO: There must be a cleaner way to do this below
# Create VAR latent variables
for variable in range(self.ylen):
self.latent_variables.add_z(self.data_name[variable] + ' Constant', fam.Normal(0,3,transform=None), fam.Normal(0,3))
other_variables = np.delete(range(self.ylen), [variable])
for lag_no in range(self.lags):
self.latent_variables.add_z(str(self.data_name[variable]) + ' AR(' + str(lag_no+1) + ')', fam.Normal(0,0.5,transform=None), fam.Normal(0,3))
for other in other_variables:
self.latent_variables.add_z(str(self.data_name[other]) + ' to ' + str(self.data_name[variable]) + ' AR(' + str(lag_no+1) + ')', fam.Normal(0,0.5,transform=None), fam.Normal(0,3))
starting_params_temp = self._create_B_direct().flatten()
# Variance latent variables
for i in range(self.ylen):
for k in range(self.ylen):
if i == k:
self.latent_variables.add_z('Cholesky Diagonal ' + str(i), fam.Flat(transform='exp'), fam.Normal(0,3))
elif i > k:
self.latent_variables.add_z('Cholesky Off-Diagonal (' + str(i) + ',' + str(k) + ')', fam.Flat(transform=None), fam.Normal(0,3))
for i in range(0,self.ylen):
for k in range(0,self.ylen):
if i == k:
starting_params_temp = np.append(starting_params_temp,np.array([0.5]))
elif i > k:
starting_params_temp = np.append(starting_params_temp,np.array([0.0]))
self.latent_variables.set_z_starting_values(starting_params_temp) | Creates model latent variables
Returns
----------
None (changes model attributes) | Below is the the instruction that describes the task:
### Input:
Creates model latent variables
Returns
----------
None (changes model attributes)
### Response:
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
# TODO: There must be a cleaner way to do this below
# Create VAR latent variables
for variable in range(self.ylen):
self.latent_variables.add_z(self.data_name[variable] + ' Constant', fam.Normal(0,3,transform=None), fam.Normal(0,3))
other_variables = np.delete(range(self.ylen), [variable])
for lag_no in range(self.lags):
self.latent_variables.add_z(str(self.data_name[variable]) + ' AR(' + str(lag_no+1) + ')', fam.Normal(0,0.5,transform=None), fam.Normal(0,3))
for other in other_variables:
self.latent_variables.add_z(str(self.data_name[other]) + ' to ' + str(self.data_name[variable]) + ' AR(' + str(lag_no+1) + ')', fam.Normal(0,0.5,transform=None), fam.Normal(0,3))
starting_params_temp = self._create_B_direct().flatten()
# Variance latent variables
for i in range(self.ylen):
for k in range(self.ylen):
if i == k:
self.latent_variables.add_z('Cholesky Diagonal ' + str(i), fam.Flat(transform='exp'), fam.Normal(0,3))
elif i > k:
self.latent_variables.add_z('Cholesky Off-Diagonal (' + str(i) + ',' + str(k) + ')', fam.Flat(transform=None), fam.Normal(0,3))
for i in range(0,self.ylen):
for k in range(0,self.ylen):
if i == k:
starting_params_temp = np.append(starting_params_temp,np.array([0.5]))
elif i > k:
starting_params_temp = np.append(starting_params_temp,np.array([0.0]))
self.latent_variables.set_z_starting_values(starting_params_temp) |
def sanitize_command_options(options):
"""
Sanitizes command options.
"""
multiples = [
'badges',
'exclude_badges',
]
for option in multiples:
if options.get(option):
value = options[option]
if value:
options[option] = [v for v in value.split(' ') if v]
return options | Sanitizes command options. | Below is the the instruction that describes the task:
### Input:
Sanitizes command options.
### Response:
def sanitize_command_options(options):
"""
Sanitizes command options.
"""
multiples = [
'badges',
'exclude_badges',
]
for option in multiples:
if options.get(option):
value = options[option]
if value:
options[option] = [v for v in value.split(' ') if v]
return options |
def ConsumeIdentifierOrNumber(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER_OR_NUMBER.match(result):
raise self.ParseError('Expected identifier or number.')
self.NextToken()
return result | Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed. | Below is the the instruction that describes the task:
### Input:
Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
### Response:
def ConsumeIdentifierOrNumber(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER_OR_NUMBER.match(result):
raise self.ParseError('Expected identifier or number.')
self.NextToken()
return result |
def initialize_weights_nn(data, means, lognorm=True):
"""
Initializes the weights with a nearest-neighbor approach using the means.
"""
# TODO
genes, cells = data.shape
k = means.shape[1]
if lognorm:
data = log1p(cell_normalize(data))
for i in range(cells):
for j in range(k):
pass | Initializes the weights with a nearest-neighbor approach using the means. | Below is the the instruction that describes the task:
### Input:
Initializes the weights with a nearest-neighbor approach using the means.
### Response:
def initialize_weights_nn(data, means, lognorm=True):
"""
Initializes the weights with a nearest-neighbor approach using the means.
"""
# TODO
genes, cells = data.shape
k = means.shape[1]
if lognorm:
data = log1p(cell_normalize(data))
for i in range(cells):
for j in range(k):
pass |
def get_arsc_info(arscobj):
"""
Return a string containing all resources packages ordered by packagename, locale and type.
:param arscobj: :class:`~ARSCParser`
:return: a string
"""
buff = ""
for package in arscobj.get_packages_names():
buff += package + ":\n"
for locale in arscobj.get_locales(package):
buff += "\t" + repr(locale) + ":\n"
for ttype in arscobj.get_types(package, locale):
buff += "\t\t" + ttype + ":\n"
try:
tmp_buff = getattr(arscobj, "get_" + ttype + "_resources")(
package, locale).decode("utf-8", 'replace').split("\n")
for i in tmp_buff:
buff += "\t\t\t" + i + "\n"
except AttributeError:
pass
return buff | Return a string containing all resources packages ordered by packagename, locale and type.
:param arscobj: :class:`~ARSCParser`
:return: a string | Below is the the instruction that describes the task:
### Input:
Return a string containing all resources packages ordered by packagename, locale and type.
:param arscobj: :class:`~ARSCParser`
:return: a string
### Response:
def get_arsc_info(arscobj):
"""
Return a string containing all resources packages ordered by packagename, locale and type.
:param arscobj: :class:`~ARSCParser`
:return: a string
"""
buff = ""
for package in arscobj.get_packages_names():
buff += package + ":\n"
for locale in arscobj.get_locales(package):
buff += "\t" + repr(locale) + ":\n"
for ttype in arscobj.get_types(package, locale):
buff += "\t\t" + ttype + ":\n"
try:
tmp_buff = getattr(arscobj, "get_" + ttype + "_resources")(
package, locale).decode("utf-8", 'replace').split("\n")
for i in tmp_buff:
buff += "\t\t\t" + i + "\n"
except AttributeError:
pass
return buff |
def point_within_radius(self, points, pt, canvas_radius,
scales=(1.0, 1.0)):
"""Points `points` and point `pt` are in data coordinates.
Return True for points within the circle defined by
a center at point `pt` and within canvas_radius.
"""
scale_x, scale_y = scales
x, y = pt
a_arr, b_arr = np.asarray(points).T
dx = np.fabs(x - a_arr) * scale_x
dy = np.fabs(y - b_arr) * scale_y
new_radius = np.sqrt(dx**2 + dy**2)
res = (new_radius <= canvas_radius)
return res | Points `points` and point `pt` are in data coordinates.
Return True for points within the circle defined by
a center at point `pt` and within canvas_radius. | Below is the the instruction that describes the task:
### Input:
Points `points` and point `pt` are in data coordinates.
Return True for points within the circle defined by
a center at point `pt` and within canvas_radius.
### Response:
def point_within_radius(self, points, pt, canvas_radius,
scales=(1.0, 1.0)):
"""Points `points` and point `pt` are in data coordinates.
Return True for points within the circle defined by
a center at point `pt` and within canvas_radius.
"""
scale_x, scale_y = scales
x, y = pt
a_arr, b_arr = np.asarray(points).T
dx = np.fabs(x - a_arr) * scale_x
dy = np.fabs(y - b_arr) * scale_y
new_radius = np.sqrt(dx**2 + dy**2)
res = (new_radius <= canvas_radius)
return res |
def van_arkel_triangle(list_of_materials, annotate=True):
"""
A static method that generates a binary van Arkel-Ketelaar triangle to
quantify the ionic, metallic and covalent character of a compound
by plotting the electronegativity difference (y) vs average (x).
See:
A.E. van Arkel, Molecules and Crystals in Inorganic Chemistry,
Interscience, New York (1956)
and
J.A.A Ketelaar, Chemical Constitution (2nd edn.), An Introduction
to the Theory of the Chemical Bond, Elsevier, New York (1958)
Args:
list_of_materials (list): A list of computed entries of binary
materials or a list of lists containing two elements (str).
annotate (bool): Whether or not to lable the points on the
triangle with reduced formula (if list of entries) or pair
of elements (if list of list of str).
"""
# F-Fr has the largest X difference. We set this
# as our top corner of the triangle (most ionic)
pt1 = np.array([(Element("F").X + Element("Fr").X) / 2,
abs(Element("F").X - Element("Fr").X)])
# Cs-Fr has the lowest average X. We set this as our
# bottom left corner of the triangle (most metallic)
pt2 = np.array([(Element("Cs").X + Element("Fr").X) / 2,
abs(Element("Cs").X - Element("Fr").X)])
# O-F has the highest average X. We set this as our
# bottom right corner of the triangle (most covalent)
pt3 = np.array([(Element("O").X + Element("F").X) / 2,
abs(Element("O").X - Element("F").X)])
# get the parameters for the lines of the triangle
d = np.array(pt1) - np.array(pt2)
slope1 = d[1] / d[0]
b1 = pt1[1] - slope1 * pt1[0]
d = pt3 - pt1
slope2 = d[1] / d[0]
b2 = pt3[1] - slope2 * pt3[0]
# Initialize the plt object
import matplotlib.pyplot as plt
# set labels and appropriate limits for plot
plt.xlim(pt2[0] - 0.45, -b2 / slope2 + 0.45)
plt.ylim(-0.45, pt1[1] + 0.45)
plt.annotate("Ionic", xy=[pt1[0] - 0.3, pt1[1] + 0.05], fontsize=20)
plt.annotate("Covalent", xy=[-b2 / slope2 - 0.65, -0.4], fontsize=20)
plt.annotate("Metallic", xy=[pt2[0] - 0.4, -0.4], fontsize=20)
plt.xlabel(r"$\frac{\chi_{A}+\chi_{B}}{2}$", fontsize=25)
plt.ylabel(r"$|\chi_{A}-\chi_{B}|$", fontsize=25)
# Set the lines of the triangle
chi_list = [el.X for el in Element]
plt.plot([min(chi_list), pt1[0]], [slope1 * min(chi_list) + b1, pt1[1]], 'k-', linewidth=3)
plt.plot([pt1[0], -b2 / slope2], [pt1[1], 0], 'k-', linewidth=3)
plt.plot([min(chi_list), -b2 / slope2], [0, 0], 'k-', linewidth=3)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
# Shade with appropriate colors corresponding to ionic, metallci and covalent
ax = plt.gca()
# ionic filling
ax.fill_between([min(chi_list), pt1[0]],
[slope1 * min(chi_list) + b1, pt1[1]], facecolor=[1, 1, 0],
zorder=-5, edgecolor=[1, 1, 0])
ax.fill_between([pt1[0], -b2 / slope2],
[pt1[1], slope2 * min(chi_list) - b1], facecolor=[1, 1, 0],
zorder=-5, edgecolor=[1, 1, 0])
# metal filling
XPt = Element("Pt").X
ax.fill_between([min(chi_list), (XPt + min(chi_list)) / 2],
[0, slope1 * (XPt + min(chi_list)) / 2 + b1],
facecolor=[1, 0, 0], zorder=-3, alpha=0.8)
ax.fill_between([(XPt + min(chi_list)) / 2, XPt],
[slope1 * ((XPt + min(chi_list)) / 2) + b1, 0],
facecolor=[1, 0, 0], zorder=-3, alpha=0.8)
# covalent filling
ax.fill_between([(XPt + min(chi_list)) / 2, ((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2],
[0, slope2 * (((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2) + b2],
facecolor=[0, 1, 0], zorder=-4, alpha=0.8)
ax.fill_between([((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2, -b2 / slope2],
[slope2 * (((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2) + b2, 0],
facecolor=[0, 1, 0], zorder=-4, alpha=0.8)
# Label the triangle with datapoints
for entry in list_of_materials:
if type(entry).__name__ not in ['ComputedEntry', 'ComputedStructureEntry']:
X_pair = [Element(el).X for el in entry]
formatted_formula = "%s-%s" % tuple(entry)
else:
X_pair = [Element(el).X for el in entry.composition.as_dict().keys()]
formatted_formula = format_formula(entry.composition.reduced_formula)
plt.scatter(np.mean(X_pair), abs(X_pair[0] - X_pair[1]), c='b', s=100)
if annotate:
plt.annotate(formatted_formula, fontsize=15,
xy=[np.mean(X_pair) + 0.005, abs(X_pair[0] - X_pair[1])])
plt.tight_layout()
return plt | A static method that generates a binary van Arkel-Ketelaar triangle to
quantify the ionic, metallic and covalent character of a compound
by plotting the electronegativity difference (y) vs average (x).
See:
A.E. van Arkel, Molecules and Crystals in Inorganic Chemistry,
Interscience, New York (1956)
and
J.A.A Ketelaar, Chemical Constitution (2nd edn.), An Introduction
to the Theory of the Chemical Bond, Elsevier, New York (1958)
Args:
list_of_materials (list): A list of computed entries of binary
materials or a list of lists containing two elements (str).
annotate (bool): Whether or not to lable the points on the
triangle with reduced formula (if list of entries) or pair
of elements (if list of list of str). | Below is the the instruction that describes the task:
### Input:
A static method that generates a binary van Arkel-Ketelaar triangle to
quantify the ionic, metallic and covalent character of a compound
by plotting the electronegativity difference (y) vs average (x).
See:
A.E. van Arkel, Molecules and Crystals in Inorganic Chemistry,
Interscience, New York (1956)
and
J.A.A Ketelaar, Chemical Constitution (2nd edn.), An Introduction
to the Theory of the Chemical Bond, Elsevier, New York (1958)
Args:
list_of_materials (list): A list of computed entries of binary
materials or a list of lists containing two elements (str).
annotate (bool): Whether or not to lable the points on the
triangle with reduced formula (if list of entries) or pair
of elements (if list of list of str).
### Response:
def van_arkel_triangle(list_of_materials, annotate=True):
"""
A static method that generates a binary van Arkel-Ketelaar triangle to
quantify the ionic, metallic and covalent character of a compound
by plotting the electronegativity difference (y) vs average (x).
See:
A.E. van Arkel, Molecules and Crystals in Inorganic Chemistry,
Interscience, New York (1956)
and
J.A.A Ketelaar, Chemical Constitution (2nd edn.), An Introduction
to the Theory of the Chemical Bond, Elsevier, New York (1958)
Args:
list_of_materials (list): A list of computed entries of binary
materials or a list of lists containing two elements (str).
annotate (bool): Whether or not to lable the points on the
triangle with reduced formula (if list of entries) or pair
of elements (if list of list of str).
"""
# F-Fr has the largest X difference. We set this
# as our top corner of the triangle (most ionic)
pt1 = np.array([(Element("F").X + Element("Fr").X) / 2,
abs(Element("F").X - Element("Fr").X)])
# Cs-Fr has the lowest average X. We set this as our
# bottom left corner of the triangle (most metallic)
pt2 = np.array([(Element("Cs").X + Element("Fr").X) / 2,
abs(Element("Cs").X - Element("Fr").X)])
# O-F has the highest average X. We set this as our
# bottom right corner of the triangle (most covalent)
pt3 = np.array([(Element("O").X + Element("F").X) / 2,
abs(Element("O").X - Element("F").X)])
# get the parameters for the lines of the triangle
d = np.array(pt1) - np.array(pt2)
slope1 = d[1] / d[0]
b1 = pt1[1] - slope1 * pt1[0]
d = pt3 - pt1
slope2 = d[1] / d[0]
b2 = pt3[1] - slope2 * pt3[0]
# Initialize the plt object
import matplotlib.pyplot as plt
# set labels and appropriate limits for plot
plt.xlim(pt2[0] - 0.45, -b2 / slope2 + 0.45)
plt.ylim(-0.45, pt1[1] + 0.45)
plt.annotate("Ionic", xy=[pt1[0] - 0.3, pt1[1] + 0.05], fontsize=20)
plt.annotate("Covalent", xy=[-b2 / slope2 - 0.65, -0.4], fontsize=20)
plt.annotate("Metallic", xy=[pt2[0] - 0.4, -0.4], fontsize=20)
plt.xlabel(r"$\frac{\chi_{A}+\chi_{B}}{2}$", fontsize=25)
plt.ylabel(r"$|\chi_{A}-\chi_{B}|$", fontsize=25)
# Set the lines of the triangle
chi_list = [el.X for el in Element]
plt.plot([min(chi_list), pt1[0]], [slope1 * min(chi_list) + b1, pt1[1]], 'k-', linewidth=3)
plt.plot([pt1[0], -b2 / slope2], [pt1[1], 0], 'k-', linewidth=3)
plt.plot([min(chi_list), -b2 / slope2], [0, 0], 'k-', linewidth=3)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
# Shade with appropriate colors corresponding to ionic, metallci and covalent
ax = plt.gca()
# ionic filling
ax.fill_between([min(chi_list), pt1[0]],
[slope1 * min(chi_list) + b1, pt1[1]], facecolor=[1, 1, 0],
zorder=-5, edgecolor=[1, 1, 0])
ax.fill_between([pt1[0], -b2 / slope2],
[pt1[1], slope2 * min(chi_list) - b1], facecolor=[1, 1, 0],
zorder=-5, edgecolor=[1, 1, 0])
# metal filling
XPt = Element("Pt").X
ax.fill_between([min(chi_list), (XPt + min(chi_list)) / 2],
[0, slope1 * (XPt + min(chi_list)) / 2 + b1],
facecolor=[1, 0, 0], zorder=-3, alpha=0.8)
ax.fill_between([(XPt + min(chi_list)) / 2, XPt],
[slope1 * ((XPt + min(chi_list)) / 2) + b1, 0],
facecolor=[1, 0, 0], zorder=-3, alpha=0.8)
# covalent filling
ax.fill_between([(XPt + min(chi_list)) / 2, ((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2],
[0, slope2 * (((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2) + b2],
facecolor=[0, 1, 0], zorder=-4, alpha=0.8)
ax.fill_between([((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2, -b2 / slope2],
[slope2 * (((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2) + b2, 0],
facecolor=[0, 1, 0], zorder=-4, alpha=0.8)
# Label the triangle with datapoints
for entry in list_of_materials:
if type(entry).__name__ not in ['ComputedEntry', 'ComputedStructureEntry']:
X_pair = [Element(el).X for el in entry]
formatted_formula = "%s-%s" % tuple(entry)
else:
X_pair = [Element(el).X for el in entry.composition.as_dict().keys()]
formatted_formula = format_formula(entry.composition.reduced_formula)
plt.scatter(np.mean(X_pair), abs(X_pair[0] - X_pair[1]), c='b', s=100)
if annotate:
plt.annotate(formatted_formula, fontsize=15,
xy=[np.mean(X_pair) + 0.005, abs(X_pair[0] - X_pair[1])])
plt.tight_layout()
return plt |
def CALL(cpu, op0):
"""
Procedure call.
Saves procedure linking information on the stack and branches to the called procedure specified using the target
operand. The target operand specifies the address of the first instruction in the called procedure. The operand can
be an immediate value, a general-purpose register, or a memory location.
:param cpu: current CPU.
:param op0: target operand.
"""
# TODO FIX 64Bit FIX segment
proc = op0.read()
cpu.push(cpu.PC, cpu.address_bit_size)
cpu.PC = proc | Procedure call.
Saves procedure linking information on the stack and branches to the called procedure specified using the target
operand. The target operand specifies the address of the first instruction in the called procedure. The operand can
be an immediate value, a general-purpose register, or a memory location.
:param cpu: current CPU.
:param op0: target operand. | Below is the the instruction that describes the task:
### Input:
Procedure call.
Saves procedure linking information on the stack and branches to the called procedure specified using the target
operand. The target operand specifies the address of the first instruction in the called procedure. The operand can
be an immediate value, a general-purpose register, or a memory location.
:param cpu: current CPU.
:param op0: target operand.
### Response:
def CALL(cpu, op0):
"""
Procedure call.
Saves procedure linking information on the stack and branches to the called procedure specified using the target
operand. The target operand specifies the address of the first instruction in the called procedure. The operand can
be an immediate value, a general-purpose register, or a memory location.
:param cpu: current CPU.
:param op0: target operand.
"""
# TODO FIX 64Bit FIX segment
proc = op0.read()
cpu.push(cpu.PC, cpu.address_bit_size)
cpu.PC = proc |
def print_(rows, limit=15, sort='size', order='descending'):
"""Print the rows as a summary.
Keyword arguments:
limit -- the maximum number of elements to be listed
sort -- sort elements by 'size', 'type', or '#'
order -- sort 'ascending' or 'descending'
"""
localrows = []
for row in rows:
localrows.append(list(row))
# input validation
sortby = ['type', '#', 'size']
if sort not in sortby:
raise ValueError("invalid sort, should be one of" + str(sortby))
orders = ['ascending', 'descending']
if order not in orders:
raise ValueError("invalid order, should be one of" + str(orders))
# sort rows
if sortby.index(sort) == 0:
if order == "ascending":
localrows.sort(key=lambda x: _repr(x[0]))
elif order == "descending":
localrows.sort(key=lambda x: _repr(x[0]), reverse=True)
else:
if order == "ascending":
localrows.sort(key=lambda x: x[sortby.index(sort)])
elif order == "descending":
localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True)
# limit rows
localrows = localrows[0:limit]
for row in localrows:
row[2] = stringutils.pp(row[2])
# print rows
localrows.insert(0,["types", "# objects", "total size"])
_print_table(localrows) | Print the rows as a summary.
Keyword arguments:
limit -- the maximum number of elements to be listed
sort -- sort elements by 'size', 'type', or '#'
order -- sort 'ascending' or 'descending' | Below is the the instruction that describes the task:
### Input:
Print the rows as a summary.
Keyword arguments:
limit -- the maximum number of elements to be listed
sort -- sort elements by 'size', 'type', or '#'
order -- sort 'ascending' or 'descending'
### Response:
def print_(rows, limit=15, sort='size', order='descending'):
"""Print the rows as a summary.
Keyword arguments:
limit -- the maximum number of elements to be listed
sort -- sort elements by 'size', 'type', or '#'
order -- sort 'ascending' or 'descending'
"""
localrows = []
for row in rows:
localrows.append(list(row))
# input validation
sortby = ['type', '#', 'size']
if sort not in sortby:
raise ValueError("invalid sort, should be one of" + str(sortby))
orders = ['ascending', 'descending']
if order not in orders:
raise ValueError("invalid order, should be one of" + str(orders))
# sort rows
if sortby.index(sort) == 0:
if order == "ascending":
localrows.sort(key=lambda x: _repr(x[0]))
elif order == "descending":
localrows.sort(key=lambda x: _repr(x[0]), reverse=True)
else:
if order == "ascending":
localrows.sort(key=lambda x: x[sortby.index(sort)])
elif order == "descending":
localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True)
# limit rows
localrows = localrows[0:limit]
for row in localrows:
row[2] = stringutils.pp(row[2])
# print rows
localrows.insert(0,["types", "# objects", "total size"])
_print_table(localrows) |
def src_vocab(self):
"""Source Vocabulary of the Dataset.
Returns
-------
src_vocab : Vocab
Source vocabulary.
"""
if self._src_vocab is None:
src_vocab_file_name, src_vocab_hash = \
self._data_file[self._pair_key]['vocab' + '_' + self._src_lang]
[src_vocab_path] = self._fetch_data_path([(src_vocab_file_name, src_vocab_hash)])
with io.open(src_vocab_path, 'r', encoding='utf-8') as in_file:
self._src_vocab = Vocab.from_json(in_file.read())
return self._src_vocab | Source Vocabulary of the Dataset.
Returns
-------
src_vocab : Vocab
Source vocabulary. | Below is the the instruction that describes the task:
### Input:
Source Vocabulary of the Dataset.
Returns
-------
src_vocab : Vocab
Source vocabulary.
### Response:
def src_vocab(self):
"""Source Vocabulary of the Dataset.
Returns
-------
src_vocab : Vocab
Source vocabulary.
"""
if self._src_vocab is None:
src_vocab_file_name, src_vocab_hash = \
self._data_file[self._pair_key]['vocab' + '_' + self._src_lang]
[src_vocab_path] = self._fetch_data_path([(src_vocab_file_name, src_vocab_hash)])
with io.open(src_vocab_path, 'r', encoding='utf-8') as in_file:
self._src_vocab = Vocab.from_json(in_file.read())
return self._src_vocab |
def _load_token_cache(self):
'Reads the local fs cache for pre-authorized access tokens'
try:
logging.debug('About to read from local file cache file %s',
self.token_cache_file)
with open(self.token_cache_file, 'rb') as f:
fs_cached = cPickle.load(f)
if self._check_token_cache_type(fs_cached):
logging.debug('Loaded from file system: %s', fs_cached)
return fs_cached
else:
logging.warn('Found unexpected value in cache. %s',
fs_cached)
return None
except IOError:
logging.debug(
'Did not find file: %s on the file system.',
self.token_cache_file)
return None
except:
logging.info(
'Encountered exception loading from the file system.',
exc_info=True)
return None | Reads the local fs cache for pre-authorized access tokens | Below is the the instruction that describes the task:
### Input:
Reads the local fs cache for pre-authorized access tokens
### Response:
def _load_token_cache(self):
'Reads the local fs cache for pre-authorized access tokens'
try:
logging.debug('About to read from local file cache file %s',
self.token_cache_file)
with open(self.token_cache_file, 'rb') as f:
fs_cached = cPickle.load(f)
if self._check_token_cache_type(fs_cached):
logging.debug('Loaded from file system: %s', fs_cached)
return fs_cached
else:
logging.warn('Found unexpected value in cache. %s',
fs_cached)
return None
except IOError:
logging.debug(
'Did not find file: %s on the file system.',
self.token_cache_file)
return None
except:
logging.info(
'Encountered exception loading from the file system.',
exc_info=True)
return None |
def put(self, url, data=None):
"""Send a HTTP PUT request to a URL and return the result.
"""
self.conn.request("PUT", url, data)
return self._process_response() | Send a HTTP PUT request to a URL and return the result. | Below is the the instruction that describes the task:
### Input:
Send a HTTP PUT request to a URL and return the result.
### Response:
def put(self, url, data=None):
"""Send a HTTP PUT request to a URL and return the result.
"""
self.conn.request("PUT", url, data)
return self._process_response() |
def _process_err(self, err_msg):
"""
Processes the raw error message sent by the server
and close connection with current server.
"""
if STALE_CONNECTION in err_msg:
yield from self._process_op_err(ErrStaleConnection)
return
if AUTHORIZATION_VIOLATION in err_msg:
self._err = ErrAuthorization
else:
m = b'nats: ' + err_msg[0]
self._err = NatsError(m.decode())
do_cbs = False
if not self.is_connecting:
do_cbs = True
# FIXME: Some errors such as 'Invalid Subscription'
# do not cause the server to close the connection.
# For now we handle similar as other clients and close.
self._loop.create_task(self._close(Client.CLOSED, do_cbs)) | Processes the raw error message sent by the server
and close connection with current server. | Below is the the instruction that describes the task:
### Input:
Processes the raw error message sent by the server
and close connection with current server.
### Response:
def _process_err(self, err_msg):
"""
Processes the raw error message sent by the server
and close connection with current server.
"""
if STALE_CONNECTION in err_msg:
yield from self._process_op_err(ErrStaleConnection)
return
if AUTHORIZATION_VIOLATION in err_msg:
self._err = ErrAuthorization
else:
m = b'nats: ' + err_msg[0]
self._err = NatsError(m.decode())
do_cbs = False
if not self.is_connecting:
do_cbs = True
# FIXME: Some errors such as 'Invalid Subscription'
# do not cause the server to close the connection.
# For now we handle similar as other clients and close.
self._loop.create_task(self._close(Client.CLOSED, do_cbs)) |
def parse_midi(self, filename, **kwargs):
"""
Parse a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to be parsed.
**kwargs:
See :meth:`pypianoroll.Multitrack.parse_pretty_midi` for full
documentation.
"""
pm = pretty_midi.PrettyMIDI(filename)
self.parse_pretty_midi(pm, **kwargs) | Parse a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to be parsed.
**kwargs:
See :meth:`pypianoroll.Multitrack.parse_pretty_midi` for full
documentation. | Below is the the instruction that describes the task:
### Input:
Parse a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to be parsed.
**kwargs:
See :meth:`pypianoroll.Multitrack.parse_pretty_midi` for full
documentation.
### Response:
def parse_midi(self, filename, **kwargs):
"""
Parse a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to be parsed.
**kwargs:
See :meth:`pypianoroll.Multitrack.parse_pretty_midi` for full
documentation.
"""
pm = pretty_midi.PrettyMIDI(filename)
self.parse_pretty_midi(pm, **kwargs) |
def get_solc_input(self):
"""Walks the contract directory and returns a Solidity input dict
Learn more about Solidity input JSON here: https://goo.gl/7zKBvj
Returns:
dict: A Solidity input JSON object as a dict
"""
def legal(r, file_name):
hidden = file_name[0] == '.'
dotsol = len(file_name) > 3 and file_name[-4:] == '.sol'
path = os.path.normpath(os.path.join(r, file_name))
notfile = not os.path.isfile(path)
symlink = Path(path).is_symlink()
return dotsol and (not (symlink or hidden or notfile))
solc_input = {
'language': 'Solidity',
'sources': {
file_name: {
'urls': [os.path.realpath(os.path.join(r, file_name))]
} for r, d, f in os.walk(self.contracts_dir) for file_name in f if legal(r, file_name)
},
'settings': {
'optimizer': {
'enabled': 1,
'runs': 10000
},
'outputSelection': {
"*": {
"": [
"legacyAST",
"ast"
],
"*": [
"abi",
"evm.bytecode.object",
"evm.bytecode.sourceMap",
"evm.deployedBytecode.object",
"evm.deployedBytecode.sourceMap"
]
}
}
}
}
return solc_input | Walks the contract directory and returns a Solidity input dict
Learn more about Solidity input JSON here: https://goo.gl/7zKBvj
Returns:
dict: A Solidity input JSON object as a dict | Below is the the instruction that describes the task:
### Input:
Walks the contract directory and returns a Solidity input dict
Learn more about Solidity input JSON here: https://goo.gl/7zKBvj
Returns:
dict: A Solidity input JSON object as a dict
### Response:
def get_solc_input(self):
"""Walks the contract directory and returns a Solidity input dict
Learn more about Solidity input JSON here: https://goo.gl/7zKBvj
Returns:
dict: A Solidity input JSON object as a dict
"""
def legal(r, file_name):
hidden = file_name[0] == '.'
dotsol = len(file_name) > 3 and file_name[-4:] == '.sol'
path = os.path.normpath(os.path.join(r, file_name))
notfile = not os.path.isfile(path)
symlink = Path(path).is_symlink()
return dotsol and (not (symlink or hidden or notfile))
solc_input = {
'language': 'Solidity',
'sources': {
file_name: {
'urls': [os.path.realpath(os.path.join(r, file_name))]
} for r, d, f in os.walk(self.contracts_dir) for file_name in f if legal(r, file_name)
},
'settings': {
'optimizer': {
'enabled': 1,
'runs': 10000
},
'outputSelection': {
"*": {
"": [
"legacyAST",
"ast"
],
"*": [
"abi",
"evm.bytecode.object",
"evm.bytecode.sourceMap",
"evm.deployedBytecode.object",
"evm.deployedBytecode.sourceMap"
]
}
}
}
}
return solc_input |
def upgrade(directory=None, revision='head', sql=False, tag=None, x_arg=None):
"""Upgrade to a later version"""
config = current_app.extensions['migrate'].migrate.get_config(directory,
x_arg=x_arg)
command.upgrade(config, revision, sql=sql, tag=tag) | Upgrade to a later version | Below is the the instruction that describes the task:
### Input:
Upgrade to a later version
### Response:
def upgrade(directory=None, revision='head', sql=False, tag=None, x_arg=None):
"""Upgrade to a later version"""
config = current_app.extensions['migrate'].migrate.get_config(directory,
x_arg=x_arg)
command.upgrade(config, revision, sql=sql, tag=tag) |
def set_model(self, mdl):
"""
Setup the image model formation equation and corresponding objects into
their various objects. `mdl` is a `peri.models.Model` object
"""
self.mdl = mdl
self.mdl.check_inputs(self.comps)
for c in self.comps:
setattr(self, '_comp_'+c.category, c) | Setup the image model formation equation and corresponding objects into
their various objects. `mdl` is a `peri.models.Model` object | Below is the the instruction that describes the task:
### Input:
Setup the image model formation equation and corresponding objects into
their various objects. `mdl` is a `peri.models.Model` object
### Response:
def set_model(self, mdl):
"""
Setup the image model formation equation and corresponding objects into
their various objects. `mdl` is a `peri.models.Model` object
"""
self.mdl = mdl
self.mdl.check_inputs(self.comps)
for c in self.comps:
setattr(self, '_comp_'+c.category, c) |
def maxlevel(lst):
"""Return maximum nesting depth"""
maxlev = 0
def f(lst, level):
nonlocal maxlev
if isinstance(lst, list):
level += 1
maxlev = max(level, maxlev)
for item in lst:
f(item, level)
f(lst, 0)
return maxlev | Return maximum nesting depth | Below is the the instruction that describes the task:
### Input:
Return maximum nesting depth
### Response:
def maxlevel(lst):
"""Return maximum nesting depth"""
maxlev = 0
def f(lst, level):
nonlocal maxlev
if isinstance(lst, list):
level += 1
maxlev = max(level, maxlev)
for item in lst:
f(item, level)
f(lst, 0)
return maxlev |
def add_info_widget(self, widget):
''' add right panel widget '''
if not self.screen:
self.log.debug("No screen instance to add widget")
else:
self.screen.add_info_widget(widget) | add right panel widget | Below is the the instruction that describes the task:
### Input:
add right panel widget
### Response:
def add_info_widget(self, widget):
''' add right panel widget '''
if not self.screen:
self.log.debug("No screen instance to add widget")
else:
self.screen.add_info_widget(widget) |
def save_to_disk(self, filename_pattern=None):
"""Returns a callback to convert test record to proto and save to disk."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'save_to_disk.')
pattern = filename_pattern or self._default_filename_pattern
if not pattern:
raise RuntimeError(
'Must specify provide a filename_pattern or set a '
'_default_filename_pattern on subclass.')
def save_to_disk_callback(test_record_obj):
proto = self._convert(test_record_obj)
output_to_file = callbacks.OutputToFile(pattern)
with output_to_file.open_output_file(test_record_obj) as outfile:
outfile.write(proto.SerializeToString())
return save_to_disk_callback | Returns a callback to convert test record to proto and save to disk. | Below is the the instruction that describes the task:
### Input:
Returns a callback to convert test record to proto and save to disk.
### Response:
def save_to_disk(self, filename_pattern=None):
"""Returns a callback to convert test record to proto and save to disk."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'save_to_disk.')
pattern = filename_pattern or self._default_filename_pattern
if not pattern:
raise RuntimeError(
'Must specify provide a filename_pattern or set a '
'_default_filename_pattern on subclass.')
def save_to_disk_callback(test_record_obj):
proto = self._convert(test_record_obj)
output_to_file = callbacks.OutputToFile(pattern)
with output_to_file.open_output_file(test_record_obj) as outfile:
outfile.write(proto.SerializeToString())
return save_to_disk_callback |
def start(self):
"""
Starts this QEMU VM.
"""
with (yield from self._execute_lock):
if self.is_running():
# resume the VM if it is paused
yield from self.resume()
return
if self._manager.config.get_section_config("Qemu").getboolean("monitor", True):
try:
info = socket.getaddrinfo(self._monitor_host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
if not info:
raise QemuError("getaddrinfo returns an empty list on {}".format(self._monitor_host))
for res in info:
af, socktype, proto, _, sa = res
# let the OS find an unused port for the Qemu monitor
with socket.socket(af, socktype, proto) as sock:
sock.bind(sa)
self._monitor = sock.getsockname()[1]
except OSError as e:
raise QemuError("Could not find free port for the Qemu monitor: {}".format(e))
# check if there is enough RAM to run
self.check_available_ram(self.ram)
command = yield from self._build_command()
command_string = " ".join(shlex.quote(s) for s in command)
try:
log.info("Starting QEMU with: {}".format(command_string))
self._stdout_file = os.path.join(self.working_dir, "qemu.log")
log.info("logging to {}".format(self._stdout_file))
with open(self._stdout_file, "w", encoding="utf-8") as fd:
fd.write("Start QEMU with {}\n\nExecution log:\n".format(command_string))
self.command_line = ' '.join(command)
self._process = yield from asyncio.create_subprocess_exec(*command,
stdout=fd,
stderr=subprocess.STDOUT,
cwd=self.working_dir)
yield from self._start_ubridge()
for adapter_number, adapter in enumerate(self._ethernet_adapters):
nio = adapter.get_nio(0)
if nio:
yield from self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
self._local_udp_tunnels[adapter_number][1],
nio)
log.info('QEMU VM "{}" started PID={}'.format(self._name, self._process.pid))
self.status = "started"
monitor_process(self._process, self._termination_callback)
except (OSError, subprocess.SubprocessError, UnicodeEncodeError) as e:
stdout = self.read_stdout()
log.error("Could not start QEMU {}: {}\n{}".format(self.qemu_path, e, stdout))
raise QemuError("Could not start QEMU {}: {}\n{}".format(self.qemu_path, e, stdout))
yield from self._set_process_priority()
if self._cpu_throttling:
self._set_cpu_throttling()
if "-enable-kvm" in command_string:
self._hw_virtualization = True
try:
yield from self.start_wrap_console()
except OSError as e:
raise QemuError("Could not start QEMU console {}\n".format(e)) | Starts this QEMU VM. | Below is the the instruction that describes the task:
### Input:
Starts this QEMU VM.
### Response:
def start(self):
"""
Starts this QEMU VM.
"""
with (yield from self._execute_lock):
if self.is_running():
# resume the VM if it is paused
yield from self.resume()
return
if self._manager.config.get_section_config("Qemu").getboolean("monitor", True):
try:
info = socket.getaddrinfo(self._monitor_host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
if not info:
raise QemuError("getaddrinfo returns an empty list on {}".format(self._monitor_host))
for res in info:
af, socktype, proto, _, sa = res
# let the OS find an unused port for the Qemu monitor
with socket.socket(af, socktype, proto) as sock:
sock.bind(sa)
self._monitor = sock.getsockname()[1]
except OSError as e:
raise QemuError("Could not find free port for the Qemu monitor: {}".format(e))
# check if there is enough RAM to run
self.check_available_ram(self.ram)
command = yield from self._build_command()
command_string = " ".join(shlex.quote(s) for s in command)
try:
log.info("Starting QEMU with: {}".format(command_string))
self._stdout_file = os.path.join(self.working_dir, "qemu.log")
log.info("logging to {}".format(self._stdout_file))
with open(self._stdout_file, "w", encoding="utf-8") as fd:
fd.write("Start QEMU with {}\n\nExecution log:\n".format(command_string))
self.command_line = ' '.join(command)
self._process = yield from asyncio.create_subprocess_exec(*command,
stdout=fd,
stderr=subprocess.STDOUT,
cwd=self.working_dir)
yield from self._start_ubridge()
for adapter_number, adapter in enumerate(self._ethernet_adapters):
nio = adapter.get_nio(0)
if nio:
yield from self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
self._local_udp_tunnels[adapter_number][1],
nio)
log.info('QEMU VM "{}" started PID={}'.format(self._name, self._process.pid))
self.status = "started"
monitor_process(self._process, self._termination_callback)
except (OSError, subprocess.SubprocessError, UnicodeEncodeError) as e:
stdout = self.read_stdout()
log.error("Could not start QEMU {}: {}\n{}".format(self.qemu_path, e, stdout))
raise QemuError("Could not start QEMU {}: {}\n{}".format(self.qemu_path, e, stdout))
yield from self._set_process_priority()
if self._cpu_throttling:
self._set_cpu_throttling()
if "-enable-kvm" in command_string:
self._hw_virtualization = True
try:
yield from self.start_wrap_console()
except OSError as e:
raise QemuError("Could not start QEMU console {}\n".format(e)) |
def generate_format(self):
"""
Means that value have to be in specified format. For example date, email or other.
.. code-block:: python
{'format': 'email'}
Valid value for this definition is user@example.com but not @username
"""
with self.l('if isinstance({variable}, str):'):
format_ = self._definition['format']
if format_ in self.FORMAT_REGEXS:
format_regex = self.FORMAT_REGEXS[format_]
self._generate_format(format_, format_ + '_re_pattern', format_regex)
# format regex is used only in meta schemas
elif format_ == 'regex':
with self.l('try:'):
self.l('re.compile({variable})')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must be a valid regex")')
else:
self.l('pass') | Means that value have to be in specified format. For example date, email or other.
.. code-block:: python
{'format': 'email'}
Valid value for this definition is user@example.com but not @username | Below is the the instruction that describes the task:
### Input:
Means that value have to be in specified format. For example date, email or other.
.. code-block:: python
{'format': 'email'}
Valid value for this definition is user@example.com but not @username
### Response:
def generate_format(self):
"""
Means that value have to be in specified format. For example date, email or other.
.. code-block:: python
{'format': 'email'}
Valid value for this definition is user@example.com but not @username
"""
with self.l('if isinstance({variable}, str):'):
format_ = self._definition['format']
if format_ in self.FORMAT_REGEXS:
format_regex = self.FORMAT_REGEXS[format_]
self._generate_format(format_, format_ + '_re_pattern', format_regex)
# format regex is used only in meta schemas
elif format_ == 'regex':
with self.l('try:'):
self.l('re.compile({variable})')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must be a valid regex")')
else:
self.l('pass') |
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not op.exists(filepath):
raise IOError('File not found: ' + filepath)
return filepath | Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed. | Below is the the instruction that describes the task:
### Input:
Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
### Response:
def add_extension_if_needed(filepath, ext, check_if_exists=False):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
check_if_exists: bool
Returns
-------
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
if check_if_exists:
if not op.exists(filepath):
raise IOError('File not found: ' + filepath)
return filepath |
def _md5_of_file(sub_string):
"""
This will return the md5 of the file in sub_string
:param sub_string: str of the path or relative path to a file
:return: str
"""
md5 = hashlib.md5()
file_path = sub_string
if not os.path.exists(file_path):
file_path = os.path.join(os.environ['CAFE_DATA_DIR_PATH'], file_path)
if not os.path.exists(file_path):
file_path = file_path.replace(' ', '_')
assert (os.path.exists(file_path)), "File %s doesn't exist" % file_path
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.hexdigest() | This will return the md5 of the file in sub_string
:param sub_string: str of the path or relative path to a file
:return: str | Below is the the instruction that describes the task:
### Input:
This will return the md5 of the file in sub_string
:param sub_string: str of the path or relative path to a file
:return: str
### Response:
def _md5_of_file(sub_string):
"""
This will return the md5 of the file in sub_string
:param sub_string: str of the path or relative path to a file
:return: str
"""
md5 = hashlib.md5()
file_path = sub_string
if not os.path.exists(file_path):
file_path = os.path.join(os.environ['CAFE_DATA_DIR_PATH'], file_path)
if not os.path.exists(file_path):
file_path = file_path.replace(' ', '_')
assert (os.path.exists(file_path)), "File %s doesn't exist" % file_path
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.hexdigest() |
def combine(self, other, func, fill_value=None):
"""
Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64
"""
if fill_value is None:
fill_value = na_value_for_dtype(self.dtype, compat=False)
if isinstance(other, Series):
# If other is a Series, result is based on union of Series,
# so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
new_values = []
for idx in new_index:
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all='ignore'):
new_values.append(func(lv, rv))
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
new_index = self.index
with np.errstate(all='ignore'):
new_values = [func(lv, other) for lv in self._values]
new_name = self.name
if is_categorical_dtype(self.values):
pass
elif is_extension_array_dtype(self.values):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
new_values = self._values._from_sequence(new_values)
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass
return self._constructor(new_values, index=new_index, name=new_name) | Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64 | Below is the the instruction that describes the task:
### Input:
Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64
### Response:
def combine(self, other, func, fill_value=None):
"""
Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64
"""
if fill_value is None:
fill_value = na_value_for_dtype(self.dtype, compat=False)
if isinstance(other, Series):
# If other is a Series, result is based on union of Series,
# so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
new_values = []
for idx in new_index:
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all='ignore'):
new_values.append(func(lv, rv))
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
new_index = self.index
with np.errstate(all='ignore'):
new_values = [func(lv, other) for lv in self._values]
new_name = self.name
if is_categorical_dtype(self.values):
pass
elif is_extension_array_dtype(self.values):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
new_values = self._values._from_sequence(new_values)
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass
return self._constructor(new_values, index=new_index, name=new_name) |
def _send(self, packet):
"""Add packet to send queue."""
fut = self.loop.create_future()
self.waiters.append((fut, packet))
if self.waiters and self.in_transaction is False:
self.protocol.send_packet()
return fut | Add packet to send queue. | Below is the the instruction that describes the task:
### Input:
Add packet to send queue.
### Response:
def _send(self, packet):
"""Add packet to send queue."""
fut = self.loop.create_future()
self.waiters.append((fut, packet))
if self.waiters and self.in_transaction is False:
self.protocol.send_packet()
return fut |
def getMeanInpCurrents(params, numunits=100,
filepattern=os.path.join('simulation_output_default',
'population_input_spikes*')):
'''return a dict with the per population mean and std synaptic current,
averaging over numcells recorded units from each population in the
network
Returned currents are in unit of nA.
'''
#convolution kernels
x = np.arange(100) * params.dt
kernel = np.exp(-x / params.model_params['tau_syn_ex'])
#number of external inputs:
K_bg = np.array(sum(params.K_bg, []))
#compensate for DC CC connections if we're using that
iDC = K_bg * params.dc_amplitude * 1E-3 # unit ????
data = {}
#loop over network-populations
for i, Y in enumerate(params.Y):
if i % SIZE == RANK:
#file to open
fname = glob.glob(filepattern+'*' + Y + '*')[0]
print fname
#read in read data and assess units, up to numunits
rawdata = np.array(helpers.read_gdf(fname))
units = np.unique(rawdata[:, 0])
if numunits > units.size:
numcells = units.size
else:
numcells = numunits
units = units[:numcells]
#churn through data and extract the input currents per cell
for j, unit in enumerate(units):
slc = rawdata[:, 0] == unit
#just the spikes:
if j == 0:
dataslc = rawdata[slc, 2:]
else:
dataslc = np.r_['0,3', dataslc, rawdata[slc, 2:]]
#fix the datatype, it may be object
dataslc = dataslc.astype(float)
#fill in data-structure
data.update({
Y : {
'E' : np.convolve(dataslc[:, :, 0].mean(axis=0),
kernel, 'same')*1E-3 + float(iDC[i]),
'I' : np.convolve(dataslc[:, :, 1].mean(axis=0),
kernel, 'same')*1E-3,
'tvec' : rawdata[slc, 1],
'numunits' : numunits,
}
})
data = COMM.allgather(data)
return {k: v for d in data for k, v in d.items()} | return a dict with the per population mean and std synaptic current,
averaging over numcells recorded units from each population in the
network
Returned currents are in unit of nA. | Below is the the instruction that describes the task:
### Input:
return a dict with the per population mean and std synaptic current,
averaging over numcells recorded units from each population in the
network
Returned currents are in unit of nA.
### Response:
def getMeanInpCurrents(params, numunits=100,
filepattern=os.path.join('simulation_output_default',
'population_input_spikes*')):
'''return a dict with the per population mean and std synaptic current,
averaging over numcells recorded units from each population in the
network
Returned currents are in unit of nA.
'''
#convolution kernels
x = np.arange(100) * params.dt
kernel = np.exp(-x / params.model_params['tau_syn_ex'])
#number of external inputs:
K_bg = np.array(sum(params.K_bg, []))
#compensate for DC CC connections if we're using that
iDC = K_bg * params.dc_amplitude * 1E-3 # unit ????
data = {}
#loop over network-populations
for i, Y in enumerate(params.Y):
if i % SIZE == RANK:
#file to open
fname = glob.glob(filepattern+'*' + Y + '*')[0]
print fname
#read in read data and assess units, up to numunits
rawdata = np.array(helpers.read_gdf(fname))
units = np.unique(rawdata[:, 0])
if numunits > units.size:
numcells = units.size
else:
numcells = numunits
units = units[:numcells]
#churn through data and extract the input currents per cell
for j, unit in enumerate(units):
slc = rawdata[:, 0] == unit
#just the spikes:
if j == 0:
dataslc = rawdata[slc, 2:]
else:
dataslc = np.r_['0,3', dataslc, rawdata[slc, 2:]]
#fix the datatype, it may be object
dataslc = dataslc.astype(float)
#fill in data-structure
data.update({
Y : {
'E' : np.convolve(dataslc[:, :, 0].mean(axis=0),
kernel, 'same')*1E-3 + float(iDC[i]),
'I' : np.convolve(dataslc[:, :, 1].mean(axis=0),
kernel, 'same')*1E-3,
'tvec' : rawdata[slc, 1],
'numunits' : numunits,
}
})
data = COMM.allgather(data)
return {k: v for d in data for k, v in d.items()} |
def safeMkdir(p, permissions = permissions755):
'''Wrapper around os.mkdir which does not raise an error if the directory exists.'''
try:
os.mkdir(p)
except OSError:
pass
os.chmod(p, permissions) | Wrapper around os.mkdir which does not raise an error if the directory exists. | Below is the the instruction that describes the task:
### Input:
Wrapper around os.mkdir which does not raise an error if the directory exists.
### Response:
def safeMkdir(p, permissions = permissions755):
'''Wrapper around os.mkdir which does not raise an error if the directory exists.'''
try:
os.mkdir(p)
except OSError:
pass
os.chmod(p, permissions) |
def patch_splitMax(self, patches):
"""Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
"""
patch_size = self.Match_MaxBits
if patch_size == 0:
# Python has the option of not splitting strings due to its ability
# to handle integers of arbitrary precision.
return
for x in range(len(patches)):
if patches[x].length1 <= patch_size:
continue
bigpatch = patches[x]
# Remove the big old patch.
del patches[x]
x -= 1
start1 = bigpatch.start1
start2 = bigpatch.start2
precontext = ''
while len(bigpatch.diffs) != 0:
# Create one of several smaller patches.
patch = patch_obj()
empty = True
patch.start1 = start1 - len(precontext)
patch.start2 = start2 - len(precontext)
if precontext:
patch.length1 = patch.length2 = len(precontext)
patch.diffs.append((self.DIFF_EQUAL, precontext))
while (len(bigpatch.diffs) != 0 and
patch.length1 < patch_size - self.Patch_Margin):
(diff_type, diff_text) = bigpatch.diffs[0]
if diff_type == self.DIFF_INSERT:
# Insertions are harmless.
patch.length2 += len(diff_text)
start2 += len(diff_text)
patch.diffs.append(bigpatch.diffs.pop(0))
empty = False
elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and
patch.diffs[0][0] == self.DIFF_EQUAL and
len(diff_text) > 2 * patch_size):
# This is a large deletion. Let it pass in one chunk.
patch.length1 += len(diff_text)
start1 += len(diff_text)
empty = False
patch.diffs.append((diff_type, diff_text))
del bigpatch.diffs[0]
else:
# Deletion or equality. Only take as much as we can stomach.
diff_text = diff_text[:patch_size - patch.length1 -
self.Patch_Margin]
patch.length1 += len(diff_text)
start1 += len(diff_text)
if diff_type == self.DIFF_EQUAL:
patch.length2 += len(diff_text)
start2 += len(diff_text)
else:
empty = False
patch.diffs.append((diff_type, diff_text))
if diff_text == bigpatch.diffs[0][1]:
del bigpatch.diffs[0]
else:
bigpatch.diffs[0] = (bigpatch.diffs[0][0],
bigpatch.diffs[0][1][len(diff_text):])
# Compute the head context for the next patch.
precontext = self.diff_text2(patch.diffs)
precontext = precontext[-self.Patch_Margin:]
# Append the end context for this patch.
postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin]
if postcontext:
patch.length1 += len(postcontext)
patch.length2 += len(postcontext)
if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] +
postcontext)
else:
patch.diffs.append((self.DIFF_EQUAL, postcontext))
if not empty:
x += 1
patches.insert(x, patch) | Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects. | Below is the the instruction that describes the task:
### Input:
Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
### Response:
def patch_splitMax(self, patches):
"""Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
"""
patch_size = self.Match_MaxBits
if patch_size == 0:
# Python has the option of not splitting strings due to its ability
# to handle integers of arbitrary precision.
return
for x in range(len(patches)):
if patches[x].length1 <= patch_size:
continue
bigpatch = patches[x]
# Remove the big old patch.
del patches[x]
x -= 1
start1 = bigpatch.start1
start2 = bigpatch.start2
precontext = ''
while len(bigpatch.diffs) != 0:
# Create one of several smaller patches.
patch = patch_obj()
empty = True
patch.start1 = start1 - len(precontext)
patch.start2 = start2 - len(precontext)
if precontext:
patch.length1 = patch.length2 = len(precontext)
patch.diffs.append((self.DIFF_EQUAL, precontext))
while (len(bigpatch.diffs) != 0 and
patch.length1 < patch_size - self.Patch_Margin):
(diff_type, diff_text) = bigpatch.diffs[0]
if diff_type == self.DIFF_INSERT:
# Insertions are harmless.
patch.length2 += len(diff_text)
start2 += len(diff_text)
patch.diffs.append(bigpatch.diffs.pop(0))
empty = False
elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and
patch.diffs[0][0] == self.DIFF_EQUAL and
len(diff_text) > 2 * patch_size):
# This is a large deletion. Let it pass in one chunk.
patch.length1 += len(diff_text)
start1 += len(diff_text)
empty = False
patch.diffs.append((diff_type, diff_text))
del bigpatch.diffs[0]
else:
# Deletion or equality. Only take as much as we can stomach.
diff_text = diff_text[:patch_size - patch.length1 -
self.Patch_Margin]
patch.length1 += len(diff_text)
start1 += len(diff_text)
if diff_type == self.DIFF_EQUAL:
patch.length2 += len(diff_text)
start2 += len(diff_text)
else:
empty = False
patch.diffs.append((diff_type, diff_text))
if diff_text == bigpatch.diffs[0][1]:
del bigpatch.diffs[0]
else:
bigpatch.diffs[0] = (bigpatch.diffs[0][0],
bigpatch.diffs[0][1][len(diff_text):])
# Compute the head context for the next patch.
precontext = self.diff_text2(patch.diffs)
precontext = precontext[-self.Patch_Margin:]
# Append the end context for this patch.
postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin]
if postcontext:
patch.length1 += len(postcontext)
patch.length2 += len(postcontext)
if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] +
postcontext)
else:
patch.diffs.append((self.DIFF_EQUAL, postcontext))
if not empty:
x += 1
patches.insert(x, patch) |
def cmd_web_report(input_file, verbose, browser):
"""Uses Firefox or Chromium to take a screenshot of the websites.
Makes a report that includes the HTTP headers.
The expected format is one url per line.
Creates a directory called 'report' with the content inside.
\b
$ echo https://www.portantier.com | habu.web.report
"""
urls = input_file.read().decode().strip().split('\n')
report_dir = Path('report')
try:
report_dir.mkdir()
except Exception:
pass
report_file = report_dir / 'index.html'
with report_file.open('w') as outfile:
outfile.write('<!doctype html>\n')
outfile.write('<html lang=en-us>\n')
outfile.write('<meta charset=utf-8>\n')
outfile.write('<title>habu.web.report</title>\n')
outfile.write('<body>\n')
outfile.write('<table border=1 style="max-width: 100%">\n')
for i,url in enumerate(sorted(urls)):
error = False
print(i, url, file=sys.stderr)
outfile.write('<tr>\n')
outfile.write('<td style="vertical-align:top;max-width:30%">\n')
outfile.write('<p><strong>' + html.escape(url) + '</strong></p>\n')
try:
req = urllib.request.Request(url, method='HEAD')
resp = urllib.request.urlopen(req)
outfile.write('<pre style="white-space: pre-wrap;">' + html.escape(str(resp.headers)) + '</pre>\n')
except Exception as e:
outfile.write('<pre>ERROR: ' + html.escape(str(e)) + '</pre>\n')
error = True
outfile.write('</td><td>')
if not error:
web_screenshot(url, report_dir / '{}.png'.format(i), browser=browser)
outfile.write('<img src={}.png style="max-width: 100%" />\n'.format(i))
outfile.write('</td>\n')
outfile.write('</tr>\n')
outfile.write('</table>\n')
outfile.write('</body>\n')
outfile.write('</html>\n') | Uses Firefox or Chromium to take a screenshot of the websites.
Makes a report that includes the HTTP headers.
The expected format is one url per line.
Creates a directory called 'report' with the content inside.
\b
$ echo https://www.portantier.com | habu.web.report | Below is the the instruction that describes the task:
### Input:
Uses Firefox or Chromium to take a screenshot of the websites.
Makes a report that includes the HTTP headers.
The expected format is one url per line.
Creates a directory called 'report' with the content inside.
\b
$ echo https://www.portantier.com | habu.web.report
### Response:
def cmd_web_report(input_file, verbose, browser):
"""Uses Firefox or Chromium to take a screenshot of the websites.
Makes a report that includes the HTTP headers.
The expected format is one url per line.
Creates a directory called 'report' with the content inside.
\b
$ echo https://www.portantier.com | habu.web.report
"""
urls = input_file.read().decode().strip().split('\n')
report_dir = Path('report')
try:
report_dir.mkdir()
except Exception:
pass
report_file = report_dir / 'index.html'
with report_file.open('w') as outfile:
outfile.write('<!doctype html>\n')
outfile.write('<html lang=en-us>\n')
outfile.write('<meta charset=utf-8>\n')
outfile.write('<title>habu.web.report</title>\n')
outfile.write('<body>\n')
outfile.write('<table border=1 style="max-width: 100%">\n')
for i,url in enumerate(sorted(urls)):
error = False
print(i, url, file=sys.stderr)
outfile.write('<tr>\n')
outfile.write('<td style="vertical-align:top;max-width:30%">\n')
outfile.write('<p><strong>' + html.escape(url) + '</strong></p>\n')
try:
req = urllib.request.Request(url, method='HEAD')
resp = urllib.request.urlopen(req)
outfile.write('<pre style="white-space: pre-wrap;">' + html.escape(str(resp.headers)) + '</pre>\n')
except Exception as e:
outfile.write('<pre>ERROR: ' + html.escape(str(e)) + '</pre>\n')
error = True
outfile.write('</td><td>')
if not error:
web_screenshot(url, report_dir / '{}.png'.format(i), browser=browser)
outfile.write('<img src={}.png style="max-width: 100%" />\n'.format(i))
outfile.write('</td>\n')
outfile.write('</tr>\n')
outfile.write('</table>\n')
outfile.write('</body>\n')
outfile.write('</html>\n') |
def _symm_current(C):
"""To get rid of NaNs produced by _scalar2array, symmetrize operators
where C_ijkl = C_klij"""
nans = np.isnan(C)
C[nans] = np.einsum('klij', C)[nans]
return C | To get rid of NaNs produced by _scalar2array, symmetrize operators
where C_ijkl = C_klij | Below is the the instruction that describes the task:
### Input:
To get rid of NaNs produced by _scalar2array, symmetrize operators
where C_ijkl = C_klij
### Response:
def _symm_current(C):
"""To get rid of NaNs produced by _scalar2array, symmetrize operators
where C_ijkl = C_klij"""
nans = np.isnan(C)
C[nans] = np.einsum('klij', C)[nans]
return C |
def save(self, force_eav=False, **kwargs):
"""
Saves entity instance and creates/updates related attribute instances.
:param eav: if True (default), EAV attributes are saved along with entity.
"""
# save entity
super(BaseEntity, self).save(**kwargs)
# TODO: think about use cases; are we doing it right?
#if not self.check_eav_allowed():
# import warnings
# warnings.warn('EAV attributes are going to be saved along with entity'
# ' despite %s.check_eav_allowed() returned False.'
# % type(self), RuntimeWarning)
# create/update EAV attributes
for schema in self.get_schemata():
value = getattr(self, schema.name, None)
schema.save_attr(self, value) | Saves entity instance and creates/updates related attribute instances.
:param eav: if True (default), EAV attributes are saved along with entity. | Below is the the instruction that describes the task:
### Input:
Saves entity instance and creates/updates related attribute instances.
:param eav: if True (default), EAV attributes are saved along with entity.
### Response:
def save(self, force_eav=False, **kwargs):
"""
Saves entity instance and creates/updates related attribute instances.
:param eav: if True (default), EAV attributes are saved along with entity.
"""
# save entity
super(BaseEntity, self).save(**kwargs)
# TODO: think about use cases; are we doing it right?
#if not self.check_eav_allowed():
# import warnings
# warnings.warn('EAV attributes are going to be saved along with entity'
# ' despite %s.check_eav_allowed() returned False.'
# % type(self), RuntimeWarning)
# create/update EAV attributes
for schema in self.get_schemata():
value = getattr(self, schema.name, None)
schema.save_attr(self, value) |
def get_vmpolicy_macaddr_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_vmpolicy_macaddr_input_vcenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
vcenter = ET.SubElement(input, "vcenter")
vcenter.text = kwargs.pop('vcenter')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _check_vmware_workstation_requirements(self, ws_version):
"""
Check minimum requirements to use VMware Workstation.
VIX 1.13 was the release for Workstation 10.
VIX 1.14 was the release for Workstation 11.
VIX 1.15 was the release for Workstation Pro 12.
:param ws_version: VMware Workstation major version.
"""
ws_version = int(ws_version)
if ws_version < 10:
raise VMwareError("Using VMware Workstation requires version 10 or above")
elif ws_version == 10:
yield from self.check_vmrun_version(minimum_required_version="1.13.0")
elif ws_version == 11:
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
elif ws_version >= 12:
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
self._host_type = "ws" | Check minimum requirements to use VMware Workstation.
VIX 1.13 was the release for Workstation 10.
VIX 1.14 was the release for Workstation 11.
VIX 1.15 was the release for Workstation Pro 12.
:param ws_version: VMware Workstation major version. | Below is the the instruction that describes the task:
### Input:
Check minimum requirements to use VMware Workstation.
VIX 1.13 was the release for Workstation 10.
VIX 1.14 was the release for Workstation 11.
VIX 1.15 was the release for Workstation Pro 12.
:param ws_version: VMware Workstation major version.
### Response:
def _check_vmware_workstation_requirements(self, ws_version):
"""
Check minimum requirements to use VMware Workstation.
VIX 1.13 was the release for Workstation 10.
VIX 1.14 was the release for Workstation 11.
VIX 1.15 was the release for Workstation Pro 12.
:param ws_version: VMware Workstation major version.
"""
ws_version = int(ws_version)
if ws_version < 10:
raise VMwareError("Using VMware Workstation requires version 10 or above")
elif ws_version == 10:
yield from self.check_vmrun_version(minimum_required_version="1.13.0")
elif ws_version == 11:
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
elif ws_version >= 12:
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
self._host_type = "ws" |
def _read_page_header(file_obj):
"""Read the page_header from the given fo."""
tin = TFileTransport(file_obj)
pin = TCompactProtocolFactory().get_protocol(tin)
page_header = parquet_thrift.PageHeader()
page_header.read(pin)
return page_header | Read the page_header from the given fo. | Below is the the instruction that describes the task:
### Input:
Read the page_header from the given fo.
### Response:
def _read_page_header(file_obj):
"""Read the page_header from the given fo."""
tin = TFileTransport(file_obj)
pin = TCompactProtocolFactory().get_protocol(tin)
page_header = parquet_thrift.PageHeader()
page_header.read(pin)
return page_header |
def publishToOther(self, roomId, name, data):
""" Publish to only other people than myself """
tmpList = self.getRoom(roomId)
# Select everybody except me
userList = [x for x in tmpList if x is not self]
self.publishToRoom(roomId, name, data, userList) | Publish to only other people than myself | Below is the the instruction that describes the task:
### Input:
Publish to only other people than myself
### Response:
def publishToOther(self, roomId, name, data):
""" Publish to only other people than myself """
tmpList = self.getRoom(roomId)
# Select everybody except me
userList = [x for x in tmpList if x is not self]
self.publishToRoom(roomId, name, data, userList) |
def catalog(self):
"""
Read the output catalog produced by the last SExtractor run.
Output is a list of dictionaries, with a dictionary for
each star: {'param1': value, 'param2': value, ...}.
"""
output_f = SExtractorfile(self.config['CATALOG_NAME'], 'r')
c = output_f.read()
output_f.close()
return c | Read the output catalog produced by the last SExtractor run.
Output is a list of dictionaries, with a dictionary for
each star: {'param1': value, 'param2': value, ...}. | Below is the the instruction that describes the task:
### Input:
Read the output catalog produced by the last SExtractor run.
Output is a list of dictionaries, with a dictionary for
each star: {'param1': value, 'param2': value, ...}.
### Response:
def catalog(self):
"""
Read the output catalog produced by the last SExtractor run.
Output is a list of dictionaries, with a dictionary for
each star: {'param1': value, 'param2': value, ...}.
"""
output_f = SExtractorfile(self.config['CATALOG_NAME'], 'r')
c = output_f.read()
output_f.close()
return c |
def to_dict(self, search_fields=None):
"""Builds dict without None object fields"""
fields = self._fields
if search_fields == 'update':
fields = self._search_for_update_fields
elif search_fields == 'all':
fields = self._all_searchable_fields
elif search_fields == 'exclude':
# exclude search fields for update actions,
# but include updateable_search_fields
fields = [field for field in self._fields
if field in self._updateable_search_fields or
field not in self._search_for_update_fields]
return {field: self.field_to_dict(field) for field in fields
if getattr(self, field, None) is not None} | Builds dict without None object fields | Below is the the instruction that describes the task:
### Input:
Builds dict without None object fields
### Response:
def to_dict(self, search_fields=None):
"""Builds dict without None object fields"""
fields = self._fields
if search_fields == 'update':
fields = self._search_for_update_fields
elif search_fields == 'all':
fields = self._all_searchable_fields
elif search_fields == 'exclude':
# exclude search fields for update actions,
# but include updateable_search_fields
fields = [field for field in self._fields
if field in self._updateable_search_fields or
field not in self._search_for_update_fields]
return {field: self.field_to_dict(field) for field in fields
if getattr(self, field, None) is not None} |
def get_item_admin_session_for_bank(self, bank_id, proxy):
"""Gets the ``OsidSession`` associated with the item admin service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the bank
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.ItemAdminSession) - ``an
_item_admin_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_item_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_admin()`` and ``supports_visible_federation()``
are ``true``.*
"""
if not self.supports_item_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.ItemAdminSession(bank_id, proxy, self._runtime) | Gets the ``OsidSession`` associated with the item admin service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the bank
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.ItemAdminSession) - ``an
_item_admin_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_item_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_admin()`` and ``supports_visible_federation()``
are ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the item admin service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the bank
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.ItemAdminSession) - ``an
_item_admin_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_item_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_admin()`` and ``supports_visible_federation()``
are ``true``.*
### Response:
def get_item_admin_session_for_bank(self, bank_id, proxy):
"""Gets the ``OsidSession`` associated with the item admin service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of the bank
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.ItemAdminSession) - ``an
_item_admin_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_item_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_admin()`` and ``supports_visible_federation()``
are ``true``.*
"""
if not self.supports_item_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.ItemAdminSession(bank_id, proxy, self._runtime) |
def cancel_operation(self, name, options=None):
"""
Starts asynchronous cancellation on a long-running operation. The server
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
``google.rpc.Code.UNIMPLEMENTED``. Clients can use
``Operations.GetOperation`` or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an ``Operation.error`` value with a ``google.rpc.Status.code`` of 1,
corresponding to ``Code.CANCELLED``.
Example:
>>> from google.gapic.longrunning import operations_client
>>> api = operations_client.OperationsClient()
>>> name = ''
>>> api.cancel_operation(name)
Args:
name (string): The name of the operation resource to be cancelled.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = operations_pb2.CancelOperationRequest(name=name)
self._cancel_operation(request, options) | Starts asynchronous cancellation on a long-running operation. The server
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
``google.rpc.Code.UNIMPLEMENTED``. Clients can use
``Operations.GetOperation`` or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an ``Operation.error`` value with a ``google.rpc.Status.code`` of 1,
corresponding to ``Code.CANCELLED``.
Example:
>>> from google.gapic.longrunning import operations_client
>>> api = operations_client.OperationsClient()
>>> name = ''
>>> api.cancel_operation(name)
Args:
name (string): The name of the operation resource to be cancelled.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Starts asynchronous cancellation on a long-running operation. The server
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
``google.rpc.Code.UNIMPLEMENTED``. Clients can use
``Operations.GetOperation`` or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an ``Operation.error`` value with a ``google.rpc.Status.code`` of 1,
corresponding to ``Code.CANCELLED``.
Example:
>>> from google.gapic.longrunning import operations_client
>>> api = operations_client.OperationsClient()
>>> name = ''
>>> api.cancel_operation(name)
Args:
name (string): The name of the operation resource to be cancelled.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
### Response:
def cancel_operation(self, name, options=None):
"""
Starts asynchronous cancellation on a long-running operation. The server
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
``google.rpc.Code.UNIMPLEMENTED``. Clients can use
``Operations.GetOperation`` or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an ``Operation.error`` value with a ``google.rpc.Status.code`` of 1,
corresponding to ``Code.CANCELLED``.
Example:
>>> from google.gapic.longrunning import operations_client
>>> api = operations_client.OperationsClient()
>>> name = ''
>>> api.cancel_operation(name)
Args:
name (string): The name of the operation resource to be cancelled.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = operations_pb2.CancelOperationRequest(name=name)
self._cancel_operation(request, options) |
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files | List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files | Below is the the instruction that describes the task:
### Input:
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
### Response:
def list_files_in_directory(full_directory_path):
"""
List the files in a specified directory
Args:
full_directory_path: The full directory path to check, derive from the os module
Returns: returns a list of files
"""
files = list()
for file_name in __os.listdir(full_directory_path):
if __os.path.isfile(__os.path.join(full_directory_path, file_name)):
files.append(file_name)
return files |
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve | Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values. | Below is the the instruction that describes the task:
### Input:
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
### Response:
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve |
def _central_slopes_directions(self, data, dX, dY):
"""
Calculates magnitude/direction of slopes using central difference
"""
shp = np.array(data.shape) - 1
direction = np.full(data.shape, FLAT_ID_INT, 'float64')
mag = np.full(direction, FLAT_ID_INT, 'float64')
ind = 0
d1, d2, theta = _get_d1_d2(dX, dY, ind, [0, 1], [1, 1], shp)
s2 = (data[0:-2, 1:-1] - data[2:, 1:-1]) / d2
s1 = -(data[1:-1, 0:-2] - data[1:-1, 2:]) / d1
direction[1:-1, 1:-1] = np.arctan2(s2, s1) + np.pi
mag = np.sqrt(s1**2 + s2**2)
return mag, direction | Calculates magnitude/direction of slopes using central difference | Below is the the instruction that describes the task:
### Input:
Calculates magnitude/direction of slopes using central difference
### Response:
def _central_slopes_directions(self, data, dX, dY):
"""
Calculates magnitude/direction of slopes using central difference
"""
shp = np.array(data.shape) - 1
direction = np.full(data.shape, FLAT_ID_INT, 'float64')
mag = np.full(direction, FLAT_ID_INT, 'float64')
ind = 0
d1, d2, theta = _get_d1_d2(dX, dY, ind, [0, 1], [1, 1], shp)
s2 = (data[0:-2, 1:-1] - data[2:, 1:-1]) / d2
s1 = -(data[1:-1, 0:-2] - data[1:-1, 2:]) / d1
direction[1:-1, 1:-1] = np.arctan2(s2, s1) + np.pi
mag = np.sqrt(s1**2 + s2**2)
return mag, direction |
def update(uid, post_data, update_time=False):
'''
update the infor.
'''
title = post_data['title'].strip()
if len(title) < 2:
return False
cnt_html = tools.markdown2html(post_data['cnt_md'])
try:
if update_time:
entry2 = TabPost.update(
date=datetime.now(),
time_create=tools.timestamp()
).where(TabPost.uid == uid)
entry2.execute()
except:
pass
cur_rec = MPost.get_by_uid(uid)
entry = TabPost.update(
title=title,
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md'].strip()),
memo=post_data['memo'] if 'memo' in post_data else '',
cnt_html=cnt_html,
logo=post_data['logo'],
order=post_data['order'] if 'order' in post_data else '',
keywords=post_data['keywords'] if 'keywords' in post_data else '',
kind=post_data['kind'] if 'kind' in post_data else 1,
extinfo=post_data['extinfo'] if 'extinfo' in post_data else cur_rec.extinfo,
time_update=tools.timestamp(),
valid=post_data.get('valid', 1)
).where(TabPost.uid == uid)
entry.execute() | update the infor. | Below is the the instruction that describes the task:
### Input:
update the infor.
### Response:
def update(uid, post_data, update_time=False):
'''
update the infor.
'''
title = post_data['title'].strip()
if len(title) < 2:
return False
cnt_html = tools.markdown2html(post_data['cnt_md'])
try:
if update_time:
entry2 = TabPost.update(
date=datetime.now(),
time_create=tools.timestamp()
).where(TabPost.uid == uid)
entry2.execute()
except:
pass
cur_rec = MPost.get_by_uid(uid)
entry = TabPost.update(
title=title,
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md'].strip()),
memo=post_data['memo'] if 'memo' in post_data else '',
cnt_html=cnt_html,
logo=post_data['logo'],
order=post_data['order'] if 'order' in post_data else '',
keywords=post_data['keywords'] if 'keywords' in post_data else '',
kind=post_data['kind'] if 'kind' in post_data else 1,
extinfo=post_data['extinfo'] if 'extinfo' in post_data else cur_rec.extinfo,
time_update=tools.timestamp(),
valid=post_data.get('valid', 1)
).where(TabPost.uid == uid)
entry.execute() |
def download_url(self, timeout=60, name=None):
"""
Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str
"""
if "local" in self.driver.name.lower():
return url_for(SERVER_ENDPOINT,
object_name=self.name,
dl=1,
name=name,
_external=True)
else:
driver_name = self.driver.name.lower()
expires = (datetime.datetime.now()
+ datetime.timedelta(seconds=timeout)).strftime("%s")
if 's3' in driver_name or 'google' in driver_name:
s2s = "GET\n\n\n{expires}\n/{object_name}"\
.format(expires=expires, object_name=self.path)
h = hmac.new(self.driver.secret.encode('utf-8'), s2s.encode('utf-8'), hashlib.sha1)
s = base64.encodestring(h.digest()).strip()
_keyIdName = "AWSAccessKeyId" if "s3" in driver_name else "GoogleAccessId"
params = {
_keyIdName: self.driver.key,
"Expires": expires,
"Signature": s
}
urlkv = urlencode(params)
return "%s?%s" % (self.secure_url, urlkv)
elif 'cloudfiles' in driver_name:
return self.driver.ex_get_object_temp_url(self._obj,
method="GET",
timeout=expires)
else:
raise NotImplemented("This provider '%s' doesn't support or "
"doesn't have a signed url "
"implemented yet" % self.provider_name) | Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str | Below is the the instruction that describes the task:
### Input:
Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str
### Response:
def download_url(self, timeout=60, name=None):
"""
Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str
"""
if "local" in self.driver.name.lower():
return url_for(SERVER_ENDPOINT,
object_name=self.name,
dl=1,
name=name,
_external=True)
else:
driver_name = self.driver.name.lower()
expires = (datetime.datetime.now()
+ datetime.timedelta(seconds=timeout)).strftime("%s")
if 's3' in driver_name or 'google' in driver_name:
s2s = "GET\n\n\n{expires}\n/{object_name}"\
.format(expires=expires, object_name=self.path)
h = hmac.new(self.driver.secret.encode('utf-8'), s2s.encode('utf-8'), hashlib.sha1)
s = base64.encodestring(h.digest()).strip()
_keyIdName = "AWSAccessKeyId" if "s3" in driver_name else "GoogleAccessId"
params = {
_keyIdName: self.driver.key,
"Expires": expires,
"Signature": s
}
urlkv = urlencode(params)
return "%s?%s" % (self.secure_url, urlkv)
elif 'cloudfiles' in driver_name:
return self.driver.ex_get_object_temp_url(self._obj,
method="GET",
timeout=expires)
else:
raise NotImplemented("This provider '%s' doesn't support or "
"doesn't have a signed url "
"implemented yet" % self.provider_name) |
def indent(rows, hasHeader=False, headerChar='-', delim=' | ', justify='left',
separateRows=False, prefix='', postfix='', wrapfunc=lambda x: x):
'''Indents a table by column.
- rows: A sequence of sequences of items, one sequence per row.
- hasHeader: True if the first row consists of the columns' names.
- headerChar: Character to be used for the row separator line
(if hasHeader==True or separateRows==True).
- delim: The column delimiter.
- justify: Determines how are data justified in their column.
Valid values are 'left','right' and 'center'.
- separateRows: True if rows are to be separated by a line
of 'headerChar's.
- prefix: A string prepended to each printed row.
- postfix: A string appended to each printed row.
- wrapfunc: A function f(text) for wrapping text; each element in
the table is first wrapped by this function.'''
# closure for breaking logical rows to physical, using wrapfunc
def rowWrapper(row):
newRows = [wrapfunc(item).split('\n') for item in row]
return [[substr or '' for substr in item] for item in map(None, *newRows)] # NOQA
# break each logical row into one or more physical ones
logicalRows = [rowWrapper(row) for row in rows]
# columns of physical rows
columns = map(None, *reduce(operator.add, logicalRows))
# get the maximum of each column by the string length of its items
maxWidths = [max([len(str(item)) for item in column])
for column in columns]
rowSeparator = headerChar * (len(prefix) + len(postfix) +
sum(maxWidths) +
len(delim)*(len(maxWidths)-1))
# select the appropriate justify method
justify = {'center': str.center, 'right': str.rjust, 'left': str.ljust}[justify.lower()] # NOQA
output = cStringIO.StringIO()
if separateRows:
print >> output, rowSeparator
for physicalRows in logicalRows:
for row in physicalRows:
print >> output, prefix \
+ delim.join([justify(str(item), width) for (item, width) in zip(row, maxWidths)]) + postfix # NOQA
if separateRows or hasHeader:
print >> output, rowSeparator
hasHeader = False
return output.getvalue() | Indents a table by column.
- rows: A sequence of sequences of items, one sequence per row.
- hasHeader: True if the first row consists of the columns' names.
- headerChar: Character to be used for the row separator line
(if hasHeader==True or separateRows==True).
- delim: The column delimiter.
- justify: Determines how are data justified in their column.
Valid values are 'left','right' and 'center'.
- separateRows: True if rows are to be separated by a line
of 'headerChar's.
- prefix: A string prepended to each printed row.
- postfix: A string appended to each printed row.
- wrapfunc: A function f(text) for wrapping text; each element in
the table is first wrapped by this function. | Below is the the instruction that describes the task:
### Input:
Indents a table by column.
- rows: A sequence of sequences of items, one sequence per row.
- hasHeader: True if the first row consists of the columns' names.
- headerChar: Character to be used for the row separator line
(if hasHeader==True or separateRows==True).
- delim: The column delimiter.
- justify: Determines how are data justified in their column.
Valid values are 'left','right' and 'center'.
- separateRows: True if rows are to be separated by a line
of 'headerChar's.
- prefix: A string prepended to each printed row.
- postfix: A string appended to each printed row.
- wrapfunc: A function f(text) for wrapping text; each element in
the table is first wrapped by this function.
### Response:
def indent(rows, hasHeader=False, headerChar='-', delim=' | ', justify='left',
separateRows=False, prefix='', postfix='', wrapfunc=lambda x: x):
'''Indents a table by column.
- rows: A sequence of sequences of items, one sequence per row.
- hasHeader: True if the first row consists of the columns' names.
- headerChar: Character to be used for the row separator line
(if hasHeader==True or separateRows==True).
- delim: The column delimiter.
- justify: Determines how are data justified in their column.
Valid values are 'left','right' and 'center'.
- separateRows: True if rows are to be separated by a line
of 'headerChar's.
- prefix: A string prepended to each printed row.
- postfix: A string appended to each printed row.
- wrapfunc: A function f(text) for wrapping text; each element in
the table is first wrapped by this function.'''
# closure for breaking logical rows to physical, using wrapfunc
def rowWrapper(row):
newRows = [wrapfunc(item).split('\n') for item in row]
return [[substr or '' for substr in item] for item in map(None, *newRows)] # NOQA
# break each logical row into one or more physical ones
logicalRows = [rowWrapper(row) for row in rows]
# columns of physical rows
columns = map(None, *reduce(operator.add, logicalRows))
# get the maximum of each column by the string length of its items
maxWidths = [max([len(str(item)) for item in column])
for column in columns]
rowSeparator = headerChar * (len(prefix) + len(postfix) +
sum(maxWidths) +
len(delim)*(len(maxWidths)-1))
# select the appropriate justify method
justify = {'center': str.center, 'right': str.rjust, 'left': str.ljust}[justify.lower()] # NOQA
output = cStringIO.StringIO()
if separateRows:
print >> output, rowSeparator
for physicalRows in logicalRows:
for row in physicalRows:
print >> output, prefix \
+ delim.join([justify(str(item), width) for (item, width) in zip(row, maxWidths)]) + postfix # NOQA
if separateRows or hasHeader:
print >> output, rowSeparator
hasHeader = False
return output.getvalue() |
def parse(text):
"""Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed
"""
rv = {}
m = META.match(text)
while m:
key = m.group(1)
value = m.group(2)
value = INDENTATION.sub('\n', value.strip())
rv[key] = value
text = text[len(m.group(0)):]
m = META.match(text)
return rv, text | Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed | Below is the the instruction that describes the task:
### Input:
Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed
### Response:
def parse(text):
"""Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed
"""
rv = {}
m = META.match(text)
while m:
key = m.group(1)
value = m.group(2)
value = INDENTATION.sub('\n', value.strip())
rv[key] = value
text = text[len(m.group(0)):]
m = META.match(text)
return rv, text |
def compare(dicts):
"""Compare by iteration"""
common_members = {}
common_keys = reduce(lambda x, y: x & y, map(dict.keys, dicts))
for k in common_keys:
common_members[k] = list(
reduce(lambda x, y: x & y, [set(d[k]) for d in dicts]))
return common_members | Compare by iteration | Below is the the instruction that describes the task:
### Input:
Compare by iteration
### Response:
def compare(dicts):
"""Compare by iteration"""
common_members = {}
common_keys = reduce(lambda x, y: x & y, map(dict.keys, dicts))
for k in common_keys:
common_members[k] = list(
reduce(lambda x, y: x & y, [set(d[k]) for d in dicts]))
return common_members |
def _check_stringify_year_row(self, row_index):
'''
Checks the given row to see if it is labeled year data and fills any blank years within that
data.
'''
table_row = self.table[row_index]
# State trackers
prior_year = None
for column_index in range(self.start[1]+1, self.end[1]):
current_year = table_row[column_index]
# Quit if we see
if not self._check_years(current_year, prior_year):
return
# Only copy when we see a non-empty entry
if current_year:
prior_year = current_year
# If we have a title of years, convert them to strings
self._stringify_row(row_index) | Checks the given row to see if it is labeled year data and fills any blank years within that
data. | Below is the the instruction that describes the task:
### Input:
Checks the given row to see if it is labeled year data and fills any blank years within that
data.
### Response:
def _check_stringify_year_row(self, row_index):
'''
Checks the given row to see if it is labeled year data and fills any blank years within that
data.
'''
table_row = self.table[row_index]
# State trackers
prior_year = None
for column_index in range(self.start[1]+1, self.end[1]):
current_year = table_row[column_index]
# Quit if we see
if not self._check_years(current_year, prior_year):
return
# Only copy when we see a non-empty entry
if current_year:
prior_year = current_year
# If we have a title of years, convert them to strings
self._stringify_row(row_index) |
def metadata(
self, output_format=None, feature_name_callback=None, **kwargs
):
"""
Gets SensorML objects for all procedures in your filtered features.
You should override the default output_format for servers that do not
respond properly.
"""
callback = feature_name_callback or str
if output_format is None:
output_format = (
'text/xml; subtype="sensorML/1.0.1/profiles/ioos_sos/1.0"'
)
responses = []
if self.features is not None:
for feature in self.features:
ds_kwargs = kwargs.copy()
ds_kwargs.update(
{
"outputFormat": output_format,
"procedure": callback(feature),
}
)
responses.append(
SensorML(self.server.describe_sensor(**ds_kwargs))
)
return responses | Gets SensorML objects for all procedures in your filtered features.
You should override the default output_format for servers that do not
respond properly. | Below is the the instruction that describes the task:
### Input:
Gets SensorML objects for all procedures in your filtered features.
You should override the default output_format for servers that do not
respond properly.
### Response:
def metadata(
self, output_format=None, feature_name_callback=None, **kwargs
):
"""
Gets SensorML objects for all procedures in your filtered features.
You should override the default output_format for servers that do not
respond properly.
"""
callback = feature_name_callback or str
if output_format is None:
output_format = (
'text/xml; subtype="sensorML/1.0.1/profiles/ioos_sos/1.0"'
)
responses = []
if self.features is not None:
for feature in self.features:
ds_kwargs = kwargs.copy()
ds_kwargs.update(
{
"outputFormat": output_format,
"procedure": callback(feature),
}
)
responses.append(
SensorML(self.server.describe_sensor(**ds_kwargs))
)
return responses |
def build_pre_approval_payment_params(self, **kwargs):
""" build a dict with params """
params = kwargs or {}
params['reference'] = self.reference
params['preApprovalCode'] = self.code
for i, item in enumerate(self.items, 1):
params['itemId%s' % i] = item.get('id')
params['itemDescription%s' % i] = item.get('description')
params['itemAmount%s' % i] = item.get('amount')
params['itemQuantity%s' % i] = item.get('quantity')
params['itemWeight%s' % i] = item.get('weight')
params['itemShippingCost%s' % i] = item.get('shipping_cost')
self.data.update(params)
self.clean_none_params() | build a dict with params | Below is the the instruction that describes the task:
### Input:
build a dict with params
### Response:
def build_pre_approval_payment_params(self, **kwargs):
""" build a dict with params """
params = kwargs or {}
params['reference'] = self.reference
params['preApprovalCode'] = self.code
for i, item in enumerate(self.items, 1):
params['itemId%s' % i] = item.get('id')
params['itemDescription%s' % i] = item.get('description')
params['itemAmount%s' % i] = item.get('amount')
params['itemQuantity%s' % i] = item.get('quantity')
params['itemWeight%s' % i] = item.get('weight')
params['itemShippingCost%s' % i] = item.get('shipping_cost')
self.data.update(params)
self.clean_none_params() |
def replace_label(self, oldLabel, newLabel):
""" Replaces old label with a new one
"""
if oldLabel == newLabel:
return
tmp = re.compile(r'\b' + oldLabel + r'\b')
last = 0
l = len(newLabel)
while True:
match = tmp.search(self.asm[last:])
if not match:
break
txt = self.asm
self.asm = txt[:last + match.start()] + newLabel + txt[last + match.end():]
last += match.start() + l | Replaces old label with a new one | Below is the the instruction that describes the task:
### Input:
Replaces old label with a new one
### Response:
def replace_label(self, oldLabel, newLabel):
""" Replaces old label with a new one
"""
if oldLabel == newLabel:
return
tmp = re.compile(r'\b' + oldLabel + r'\b')
last = 0
l = len(newLabel)
while True:
match = tmp.search(self.asm[last:])
if not match:
break
txt = self.asm
self.asm = txt[:last + match.start()] + newLabel + txt[last + match.end():]
last += match.start() + l |
def _filter_subgraph(self, subgraph, predicate):
"""
Given a subgraph of the manifest, and a predicate, filter
the subgraph using that predicate. Generates a list of nodes.
"""
to_return = []
for unique_id, item in subgraph.items():
if predicate(item):
to_return.append(item)
return to_return | Given a subgraph of the manifest, and a predicate, filter
the subgraph using that predicate. Generates a list of nodes. | Below is the the instruction that describes the task:
### Input:
Given a subgraph of the manifest, and a predicate, filter
the subgraph using that predicate. Generates a list of nodes.
### Response:
def _filter_subgraph(self, subgraph, predicate):
"""
Given a subgraph of the manifest, and a predicate, filter
the subgraph using that predicate. Generates a list of nodes.
"""
to_return = []
for unique_id, item in subgraph.items():
if predicate(item):
to_return.append(item)
return to_return |
def compare_orderings(info_frags_records, linkage_orderings):
"""Given linkage groups and info_frags records, link pseudo-chromosomes to
scaffolds based on the initial contig composition of each group. Because
info_frags records are usually richer and may contain contigs not found
in linkage groups, those extra sequences are discarded.
Example with two linkage groups and two chromosomes:
>>> linkage_orderings = {
... 'linkage_group_1': [
... ['sctg_516', -3, 36614, 50582, 1],
... ['sctg_486', -3, 41893, 41893, 1],
... ['sctg_486', -3, 50054, 62841, 1],
... ['sctg_207', -3, 31842, 94039, 1],
... ['sctg_558', -3, 51212, 54212, 1],
... ],
... 'linkage_group_2': [
... ['sctg_308', -3, 15892, 25865, 1],
... ['sctg_842', -3, 0, 8974, 1],
... ['sctg_994', -3, 0, 81213, 1],
... ],
... }
>>> info_frags = {
... 'scaffold_A': [
... ['sctg_308', 996, 15892, 25865, 1],
... ['sctg_778', 1210, 45040, 78112, -1],
... ['sctg_842', 124, 0, 8974, 1],
... ],
... 'scaffold_B': [
... ['sctg_516', 5, 0, 38000, 1],
... ['sctg_486', 47, 42050, 49000, 1],
... ['sctg_1755', 878, 95001, 10844, -1],
... ['sctg_842', 126, 19000, 26084, 1],
... ['sctg_207', 705, 45500, 87056, 1],
... ],
... 'scaffold_C': [
... ['sctg_558', 745, 50045, 67851, 1],
... ['sctg_994', 12, 74201, 86010, -1],
... ],
... }
>>> matching_pairs = compare_orderings(info_frags, linkage_orderings)
>>> matching_pairs['scaffold_B']
(3, 'linkage_group_1', {'sctg_558': 'sctg_207'})
>>> matching_pairs['scaffold_A']
(2, 'linkage_group_2', {'sctg_994': 'sctg_842'})
"""
scaffolds = info_frags_records.keys()
linkage_groups = linkage_orderings.keys()
best_matching_table = dict()
for scaffold, linkage_group in itertools.product(
scaffolds, linkage_groups
):
lg_ordering = [
init_contig
for init_contig, _ in itertools.groupby(
linkage_orderings[linkage_group], operator.itemgetter(0)
)
]
scaffold_ordering = [
init_contig
for init_contig, bin_group in itertools.groupby(
info_frags_records[scaffold], operator.itemgetter(0)
)
if init_contig in lg_ordering
]
overlap = set(lg_ordering).intersection(set(scaffold_ordering))
missing_locations = dict()
for missing_block in sorted(set(lg_ordering) - set(overlap)):
for i, init_contig in enumerate(lg_ordering):
if init_contig == missing_block:
try:
block_before = lg_ordering[i - 1]
except IndexError:
block_before = "beginning"
missing_locations[missing_block] = block_before
try:
if len(overlap) > best_matching_table[scaffold][0]:
best_matching_table[scaffold] = (
len(overlap),
linkage_group,
missing_locations,
)
except KeyError:
best_matching_table[scaffold] = (
len(overlap),
linkage_group,
missing_locations,
)
return best_matching_table | Given linkage groups and info_frags records, link pseudo-chromosomes to
scaffolds based on the initial contig composition of each group. Because
info_frags records are usually richer and may contain contigs not found
in linkage groups, those extra sequences are discarded.
Example with two linkage groups and two chromosomes:
>>> linkage_orderings = {
... 'linkage_group_1': [
... ['sctg_516', -3, 36614, 50582, 1],
... ['sctg_486', -3, 41893, 41893, 1],
... ['sctg_486', -3, 50054, 62841, 1],
... ['sctg_207', -3, 31842, 94039, 1],
... ['sctg_558', -3, 51212, 54212, 1],
... ],
... 'linkage_group_2': [
... ['sctg_308', -3, 15892, 25865, 1],
... ['sctg_842', -3, 0, 8974, 1],
... ['sctg_994', -3, 0, 81213, 1],
... ],
... }
>>> info_frags = {
... 'scaffold_A': [
... ['sctg_308', 996, 15892, 25865, 1],
... ['sctg_778', 1210, 45040, 78112, -1],
... ['sctg_842', 124, 0, 8974, 1],
... ],
... 'scaffold_B': [
... ['sctg_516', 5, 0, 38000, 1],
... ['sctg_486', 47, 42050, 49000, 1],
... ['sctg_1755', 878, 95001, 10844, -1],
... ['sctg_842', 126, 19000, 26084, 1],
... ['sctg_207', 705, 45500, 87056, 1],
... ],
... 'scaffold_C': [
... ['sctg_558', 745, 50045, 67851, 1],
... ['sctg_994', 12, 74201, 86010, -1],
... ],
... }
>>> matching_pairs = compare_orderings(info_frags, linkage_orderings)
>>> matching_pairs['scaffold_B']
(3, 'linkage_group_1', {'sctg_558': 'sctg_207'})
>>> matching_pairs['scaffold_A']
(2, 'linkage_group_2', {'sctg_994': 'sctg_842'}) | Below is the the instruction that describes the task:
### Input:
Given linkage groups and info_frags records, link pseudo-chromosomes to
scaffolds based on the initial contig composition of each group. Because
info_frags records are usually richer and may contain contigs not found
in linkage groups, those extra sequences are discarded.
Example with two linkage groups and two chromosomes:
>>> linkage_orderings = {
... 'linkage_group_1': [
... ['sctg_516', -3, 36614, 50582, 1],
... ['sctg_486', -3, 41893, 41893, 1],
... ['sctg_486', -3, 50054, 62841, 1],
... ['sctg_207', -3, 31842, 94039, 1],
... ['sctg_558', -3, 51212, 54212, 1],
... ],
... 'linkage_group_2': [
... ['sctg_308', -3, 15892, 25865, 1],
... ['sctg_842', -3, 0, 8974, 1],
... ['sctg_994', -3, 0, 81213, 1],
... ],
... }
>>> info_frags = {
... 'scaffold_A': [
... ['sctg_308', 996, 15892, 25865, 1],
... ['sctg_778', 1210, 45040, 78112, -1],
... ['sctg_842', 124, 0, 8974, 1],
... ],
... 'scaffold_B': [
... ['sctg_516', 5, 0, 38000, 1],
... ['sctg_486', 47, 42050, 49000, 1],
... ['sctg_1755', 878, 95001, 10844, -1],
... ['sctg_842', 126, 19000, 26084, 1],
... ['sctg_207', 705, 45500, 87056, 1],
... ],
... 'scaffold_C': [
... ['sctg_558', 745, 50045, 67851, 1],
... ['sctg_994', 12, 74201, 86010, -1],
... ],
... }
>>> matching_pairs = compare_orderings(info_frags, linkage_orderings)
>>> matching_pairs['scaffold_B']
(3, 'linkage_group_1', {'sctg_558': 'sctg_207'})
>>> matching_pairs['scaffold_A']
(2, 'linkage_group_2', {'sctg_994': 'sctg_842'})
### Response:
def compare_orderings(info_frags_records, linkage_orderings):
"""Given linkage groups and info_frags records, link pseudo-chromosomes to
scaffolds based on the initial contig composition of each group. Because
info_frags records are usually richer and may contain contigs not found
in linkage groups, those extra sequences are discarded.
Example with two linkage groups and two chromosomes:
>>> linkage_orderings = {
... 'linkage_group_1': [
... ['sctg_516', -3, 36614, 50582, 1],
... ['sctg_486', -3, 41893, 41893, 1],
... ['sctg_486', -3, 50054, 62841, 1],
... ['sctg_207', -3, 31842, 94039, 1],
... ['sctg_558', -3, 51212, 54212, 1],
... ],
... 'linkage_group_2': [
... ['sctg_308', -3, 15892, 25865, 1],
... ['sctg_842', -3, 0, 8974, 1],
... ['sctg_994', -3, 0, 81213, 1],
... ],
... }
>>> info_frags = {
... 'scaffold_A': [
... ['sctg_308', 996, 15892, 25865, 1],
... ['sctg_778', 1210, 45040, 78112, -1],
... ['sctg_842', 124, 0, 8974, 1],
... ],
... 'scaffold_B': [
... ['sctg_516', 5, 0, 38000, 1],
... ['sctg_486', 47, 42050, 49000, 1],
... ['sctg_1755', 878, 95001, 10844, -1],
... ['sctg_842', 126, 19000, 26084, 1],
... ['sctg_207', 705, 45500, 87056, 1],
... ],
... 'scaffold_C': [
... ['sctg_558', 745, 50045, 67851, 1],
... ['sctg_994', 12, 74201, 86010, -1],
... ],
... }
>>> matching_pairs = compare_orderings(info_frags, linkage_orderings)
>>> matching_pairs['scaffold_B']
(3, 'linkage_group_1', {'sctg_558': 'sctg_207'})
>>> matching_pairs['scaffold_A']
(2, 'linkage_group_2', {'sctg_994': 'sctg_842'})
"""
scaffolds = info_frags_records.keys()
linkage_groups = linkage_orderings.keys()
best_matching_table = dict()
for scaffold, linkage_group in itertools.product(
scaffolds, linkage_groups
):
lg_ordering = [
init_contig
for init_contig, _ in itertools.groupby(
linkage_orderings[linkage_group], operator.itemgetter(0)
)
]
scaffold_ordering = [
init_contig
for init_contig, bin_group in itertools.groupby(
info_frags_records[scaffold], operator.itemgetter(0)
)
if init_contig in lg_ordering
]
overlap = set(lg_ordering).intersection(set(scaffold_ordering))
missing_locations = dict()
for missing_block in sorted(set(lg_ordering) - set(overlap)):
for i, init_contig in enumerate(lg_ordering):
if init_contig == missing_block:
try:
block_before = lg_ordering[i - 1]
except IndexError:
block_before = "beginning"
missing_locations[missing_block] = block_before
try:
if len(overlap) > best_matching_table[scaffold][0]:
best_matching_table[scaffold] = (
len(overlap),
linkage_group,
missing_locations,
)
except KeyError:
best_matching_table[scaffold] = (
len(overlap),
linkage_group,
missing_locations,
)
return best_matching_table |
def rollback(using=None):
"""
This function does the rollback itself and resets the dirty flag.
"""
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.rollback()
return
connection = tldap.backend.connections[using]
connection.rollback() | This function does the rollback itself and resets the dirty flag. | Below is the the instruction that describes the task:
### Input:
This function does the rollback itself and resets the dirty flag.
### Response:
def rollback(using=None):
"""
This function does the rollback itself and resets the dirty flag.
"""
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.rollback()
return
connection = tldap.backend.connections[using]
connection.rollback() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.