_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q257600 | TLSSession._obtain_credentials | validation | def _obtain_credentials(self):
"""
Obtains a credentials handle from secur32.dll for use with SChannel
"""
protocol_values = {
'SSLv3': Secur32Const.SP_PROT_SSL3_CLIENT,
'TLSv1': Secur32Const.SP_PROT_TLS1_CLIENT,
'TLSv1.1': Secur32Const.SP_PROT_TLS1_1_CLIENT,
'TLSv1.2': Secur32Const.SP_PROT_TLS1_2_CLIENT,
}
protocol_bit_mask = 0
for key, value in protocol_values.items():
if key in self._protocols:
protocol_bit_mask |= value
algs = [
Secur32Const.CALG_AES_128,
Secur32Const.CALG_AES_256,
Secur32Const.CALG_3DES,
Secur32Const.CALG_SHA1,
Secur32Const.CALG_ECDHE,
Secur32Const.CALG_DH_EPHEM,
Secur32Const.CALG_RSA_KEYX,
Secur32Const.CALG_RSA_SIGN,
Secur32Const.CALG_ECDSA,
Secur32Const.CALG_DSS_SIGN,
]
if 'TLSv1.2' in self._protocols:
algs.extend([
Secur32Const.CALG_SHA512,
Secur32Const.CALG_SHA384,
Secur32Const.CALG_SHA256,
])
alg_array = new(secur32, 'ALG_ID[%s]' % len(algs))
for index, alg in enumerate(algs):
alg_array[index] = alg
flags = Secur32Const.SCH_USE_STRONG_CRYPTO | Secur32Const.SCH_CRED_NO_DEFAULT_CREDS
if not self._manual_validation and not self._extra_trust_roots:
flags |= Secur32Const.SCH_CRED_AUTO_CRED_VALIDATION
else:
flags |= Secur32Const.SCH_CRED_MANUAL_CRED_VALIDATION
schannel_cred_pointer = struct(secur32, 'SCHANNEL_CRED')
schannel_cred = | python | {
"resource": ""
} |
q257601 | TLSSocket._create_buffers | validation | def _create_buffers(self, number):
"""
Creates a SecBufferDesc struct and contained SecBuffer structs
:param number:
The number of contains SecBuffer objects to create
:return:
A tuple of (SecBufferDesc pointer, SecBuffer array)
"""
buffers = new(secur32, 'SecBuffer[%d]' % number)
| python | {
"resource": ""
} |
q257602 | TLSSocket.select_read | validation | def select_read(self, timeout=None):
"""
Blocks until the socket is ready to be read from, or the timeout is hit
:param timeout:
A float - the period of time to wait for data to be read. None for
no time limit.
:return:
A boolean - if data is ready to be read. Will only be False if
timeout is not None.
"""
# If | python | {
"resource": ""
} |
q257603 | TLSSocket.read_exactly | validation | def read_exactly(self, num_bytes):
"""
Reads exactly the specified number of bytes from the socket
:param num_bytes:
An integer - the exact number of bytes to read
| python | {
"resource": ""
} |
q257604 | TLSSocket.select_write | validation | def select_write(self, timeout=None):
"""
Blocks until the socket is ready to be written to, or the timeout is hit
:param timeout:
A float - the period of time to wait for the socket to be ready to
written to. None for no time limit.
:return:
| python | {
"resource": ""
} |
q257605 | TLSSocket._raw_read | validation | def _raw_read(self):
"""
Reads data from the socket and writes it to the memory bio
used by libssl to decrypt the data. Returns the unencrypted
data for the purpose of debugging handshakes.
:return:
A byte string of ciphertext from the socket. Used for
debugging the handshake only.
"""
data = self._raw_bytes
try:
data += self._socket.recv(8192)
| python | {
"resource": ""
} |
q257606 | TLSSocket._raw_write | validation | def _raw_write(self):
"""
Takes ciphertext from the memory bio and writes it to the
socket.
:return:
A byte string of ciphertext going to the socket. Used
for debugging the handshake only.
"""
data_available = libssl.BIO_ctrl_pending(self._wbio)
if data_available == 0:
return b''
to_read = min(self._buffer_size, data_available)
read = libssl.BIO_read(self._wbio, self._bio_write_buffer, to_read)
to_write = bytes_from_buffer(self._bio_write_buffer, read)
output = to_write
while len(to_write):
raise_disconnect = False
try:
| python | {
"resource": ""
} |
q257607 | _advapi32_encrypt | validation | def _advapi32_encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext via CryptoAPI
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
context_handle = None
key_handle = None
try:
context_handle, key_handle = _advapi32_create_handles(cipher, key, iv)
| python | {
"resource": ""
} |
q257608 | _bcrypt_encrypt | validation | def _bcrypt_encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext via CNG
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
key_handle = None
try:
key_handle = _bcrypt_create_key_handle(cipher, key)
if iv is None:
iv_len = 0
else:
iv_len = len(iv)
flags = 0
if padding is True:
flags = BcryptConst.BCRYPT_BLOCK_PADDING
out_len = new(bcrypt, 'ULONG *')
res = bcrypt.BCryptEncrypt(
key_handle,
data,
len(data),
null(),
null(),
0,
null(),
0,
out_len,
flags
)
| python | {
"resource": ""
} |
q257609 | handle_openssl_error | validation | def handle_openssl_error(result, exception_class=None):
"""
Checks if an error occured, and if so throws an OSError containing the
last OpenSSL error message
:param result:
An integer result code - 1 or greater indicates success
:param exception_class:
The exception class to use for the exception if an error occurred
:raises:
OSError - when an OpenSSL error occurs
"""
if result > 0:
return
if exception_class is None:
exception_class = OSError
| python | {
"resource": ""
} |
q257610 | peek_openssl_error | validation | def peek_openssl_error():
"""
Peeks into the error stack and pulls out the lib, func and reason
:return:
A three-element tuple of integers (lib, func, reason)
"""
error = libcrypto.ERR_peek_error() | python | {
"resource": ""
} |
q257611 | extract_from_system | validation | def extract_from_system(cert_callback=None, callback_only_on_failure=False):
"""
Extracts trusted CA certificates from the OS X trusted root keychain.
:param cert_callback:
A callback that is called once for each certificate in the trust store.
It should accept two parameters: an asn1crypto.x509.Certificate object,
and a reason. The reason will be None if the certificate is being
exported, otherwise it will be a unicode string of the reason it won't.
:param callback_only_on_failure:
A boolean - if the callback should only be called when a certificate is
not exported.
:raises:
OSError - when an error is returned by the OS crypto library
:return:
A list of 3-element tuples:
- 0: a byte string of a DER-encoded certificate
- 1: a set of unicode strings that are OIDs of purposes to trust the
certificate for
- 2: a set of unicode strings that are OIDs of purposes to reject the
certificate for
"""
certs_pointer_pointer = new(CoreFoundation, 'CFArrayRef *')
res = Security.SecTrustCopyAnchorCertificates(certs_pointer_pointer)
handle_sec_error(res)
certs_pointer = unwrap(certs_pointer_pointer)
certificates = {}
trust_info = {}
all_purposes = '2.5.29.37.0'
default_trust = (set(), set())
length = CoreFoundation.CFArrayGetCount(certs_pointer)
for index in range(0, length):
cert_pointer = CoreFoundation.CFArrayGetValueAtIndex(certs_pointer, index)
der_cert, cert_hash = _cert_details(cert_pointer)
certificates[cert_hash] = der_cert
CoreFoundation.CFRelease(certs_pointer)
for domain in [SecurityConst.kSecTrustSettingsDomainUser, SecurityConst.kSecTrustSettingsDomainAdmin]:
cert_trust_settings_pointer_pointer = new(CoreFoundation, 'CFArrayRef *')
res = Security.SecTrustSettingsCopyCertificates(domain, cert_trust_settings_pointer_pointer)
if res == SecurityConst.errSecNoTrustSettings:
continue
handle_sec_error(res)
cert_trust_settings_pointer = unwrap(cert_trust_settings_pointer_pointer)
length = CoreFoundation.CFArrayGetCount(cert_trust_settings_pointer)
for index in range(0, length):
cert_pointer = CoreFoundation.CFArrayGetValueAtIndex(cert_trust_settings_pointer, index)
trust_settings_pointer_pointer = new(CoreFoundation, 'CFArrayRef *')
res = | python | {
"resource": ""
} |
q257612 | _cert_callback | validation | def _cert_callback(callback, der_cert, reason):
"""
Constructs an asn1crypto.x509.Certificate object and calls the export
callback
:param callback:
The callback to call
:param der_cert:
A byte string of the DER-encoded certificate
:param reason:
None if cert | python | {
"resource": ""
} |
q257613 | _cert_details | validation | def _cert_details(cert_pointer):
"""
Return the certificate and a hash of it
:param cert_pointer:
A SecCertificateRef
:return:
A 2-element tuple:
- [0]: A byte string of the SHA1 hash of the cert
- [1]: A byte string of the DER-encoded contents of the cert
"""
data_pointer = None
try:
data_pointer = | python | {
"resource": ""
} |
q257614 | _extract_error | validation | def _extract_error():
"""
Extracts the last OS error message into a python unicode string
:return:
A unicode string error message
"""
error_num = errno()
try:
error_string = os.strerror(error_num)
except (ValueError):
return | python | {
"resource": ""
} |
q257615 | CFHelpers.cf_dictionary_to_dict | validation | def cf_dictionary_to_dict(dictionary):
"""
Converts a CFDictionary object into a python dictionary
:param dictionary:
The CFDictionary to convert
:return:
A python dict
"""
dict_length = CoreFoundation.CFDictionaryGetCount(dictionary)
keys = (CFTypeRef * dict_length)()
values = (CFTypeRef * dict_length)()
CoreFoundation.CFDictionaryGetKeysAndValues( | python | {
"resource": ""
} |
q257616 | handle_sec_error | validation | def handle_sec_error(error, exception_class=None):
"""
Checks a Security OSStatus error code and throws an exception if there is an
error to report
:param error:
An OSStatus
:param exception_class:
The exception class to use for the exception if an error occurred
:raises:
OSError - when the OSStatus contains an error
"""
if error == 0:
return
if error in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):
raise TLSDisconnectError('The remote end closed the connection')
if error == SecurityConst.errSSLClosedGraceful:
raise TLSGracefulDisconnectError('The remote | python | {
"resource": ""
} |
q257617 | _get_func_info | validation | def _get_func_info(docstring, def_lineno, code_lines, prefix):
"""
Extracts the function signature and description of a Python function
:param docstring:
A unicode string of the docstring for the function
:param def_lineno:
An integer line number that function was defined on
:param code_lines:
A list of unicode string lines from the source file the function was
defined in
:param prefix:
A prefix to prepend to all output lines
:return:
A 2-element tuple:
- [0] A unicode string of the function signature with a docstring of
parameter info
- [1] A markdown snippet of the function description
"""
def_index = def_lineno - 1
definition = code_lines[def_index]
definition = definition.rstrip()
while not definition.endswith(':'):
def_index += 1
definition += '\n' + code_lines[def_index].rstrip()
definition = textwrap.dedent(definition).rstrip(':')
definition = definition.replace('\n', '\n' + prefix)
description = ''
found_colon = False
params = ''
for line in docstring.splitlines():
if line and line[0] == ':':
found_colon = True
if not found_colon:
if description:
description += | python | {
"resource": ""
} |
q257618 | _find_sections | validation | def _find_sections(md_ast, sections, last, last_class, total_lines=None):
"""
Walks through a CommonMark AST to find section headers that delineate
content that should be updated by this script
:param md_ast:
The AST of the markdown document
:param sections:
A dict to store the start and end lines of a section. The key will be
a two-element tuple of the section type ("class", "function",
"method" or "attribute") and identifier. The values are a two-element
tuple of the start and end line number in the markdown document of the
section.
:param last:
A dict containing information about the last section header seen.
Includes the keys "type_name", "identifier", "start_line".
:param last_class:
A unicode string of the name of the last class found - used when
processing methods and attributes.
:param total_lines:
An integer of the total number of lines in the markdown document -
used to work around a bug in the API of the Python port of CommonMark
"""
def child_walker(node):
for child, entering in node.walker():
if child == node:
continue
yield child, entering
for child, entering in child_walker(md_ast):
if child.t == 'heading':
start_line = child.sourcepos[0][0]
if child.level == 2:
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], start_line - 1)
last.clear()
if child.level in set([3, 5]):
heading_elements = []
for heading_child, _ in child_walker(child):
heading_elements.append(heading_child)
if len(heading_elements) != 2:
continue
first = heading_elements[0]
second = heading_elements[1]
if first.t != 'code':
| python | {
"resource": ""
} |
q257619 | walk_ast | validation | def walk_ast(node, code_lines, sections, md_chunks):
"""
A callback used to walk the Python AST looking for classes, functions,
methods and attributes. Generates chunks of markdown markup to replace
the existing content.
:param node:
An _ast module node object
:param code_lines:
A list of unicode strings - the source lines of the Python file
:param sections:
A dict of markdown document sections that need to be updated. The key
will be a two-element tuple of the section type ("class", "function",
"method" or "attribute") and identifier. The values are a two-element
tuple of the start and end line number in the markdown document of the
section.
:param md_chunks:
A dict with keys from the sections param and the values being a unicode
string containing a chunk of markdown markup.
"""
if isinstance(node, _ast.FunctionDef):
key = ('function', node.name)
if key not in sections:
return
docstring = ast.get_docstring(node)
def_lineno = node.lineno + len(node.decorator_list)
definition, description_md = _get_func_info(docstring, def_lineno, code_lines, '> ')
md_chunk = textwrap.dedent("""
### `%s()` function
> ```python
> %s
> ```
>
%s
""").strip() % (
node.name,
definition,
description_md
) + "\n"
md_chunks[key] = md_chunk.replace('>\n\n', '')
elif isinstance(node, _ast.ClassDef):
if ('class', node.name) not in sections:
return
for subnode in node.body:
if isinstance(subnode, _ast.FunctionDef):
node_id = node.name + '.' + subnode.name
method_key = ('method', node_id)
is_method = method_key in sections
attribute_key = ('attribute', node_id)
is_attribute = attribute_key in sections
is_constructor = subnode.name == '__init__'
if not is_constructor and not is_attribute and not is_method:
continue
docstring = ast.get_docstring(subnode)
def_lineno = subnode.lineno + len(subnode.decorator_list)
if not docstring:
continue
if is_method or is_constructor:
definition, description_md = _get_func_info(docstring, def_lineno, code_lines, '> > ')
if is_constructor:
key = ('class', node.name)
class_docstring = ast.get_docstring(node) or ''
class_description = textwrap.dedent(class_docstring).strip()
if class_description:
class_description_md = "> %s\n>" % (class_description.replace("\n", "\n> "))
else:
class_description_md = ''
md_chunk = textwrap.dedent("""
### `%s()` class
%s
> ##### constructor
>
> > ```python
| python | {
"resource": ""
} |
q257620 | system_path | validation | def system_path():
"""
Tries to find a CA certs bundle in common locations
:raises:
OSError - when no valid CA certs bundle was found on the filesystem
:return:
The full filesystem path to a CA certs bundle file
"""
ca_path = None
# Common CA cert paths
paths = [
'/usr/lib/ssl/certs/ca-certificates.crt',
'/etc/ssl/certs/ca-certificates.crt',
'/etc/ssl/certs/ca-bundle.crt',
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/ssl/ca-bundle.pem',
'/usr/local/share/certs/ca-root-nss.crt',
'/etc/ssl/cert.pem'
]
# First try SSL_CERT_FILE
if 'SSL_CERT_FILE' in os.environ:
paths.insert(0, os.environ['SSL_CERT_FILE'])
| python | {
"resource": ""
} |
q257621 | extract_from_system | validation | def extract_from_system(cert_callback=None, callback_only_on_failure=False):
"""
Extracts trusted CA certs from the system CA cert bundle
:param cert_callback:
A callback that is called once for each certificate in the trust store.
It should accept two parameters: an asn1crypto.x509.Certificate object,
and a reason. The reason will be None if the certificate is being
exported, otherwise it will be a unicode string of the reason it won't.
:param callback_only_on_failure:
A boolean - if the callback should only be called when a certificate is
not exported.
:return:
A list of 3-element tuples:
- 0: a byte string of a DER-encoded certificate
- 1: a set of unicode strings that are OIDs of purposes to trust the
certificate for
- 2: a set of unicode strings that are OIDs of purposes to reject the
certificate for
"""
all_purposes = '2.5.29.37.0'
ca_path = system_path()
output = []
with open(ca_path, 'rb') as f:
for armor_type, _, cert_bytes in | python | {
"resource": ""
} |
q257622 | _convert_filetime_to_timestamp | validation | def _convert_filetime_to_timestamp(filetime):
"""
Windows returns times as 64-bit unsigned longs that are the number
of hundreds of nanoseconds since Jan 1 1601. This converts it to
a datetime object.
:param filetime:
A FILETIME struct object
| python | {
"resource": ""
} |
q257623 | extract_chain | validation | def extract_chain(server_handshake_bytes):
"""
Extracts the X.509 certificates from the server handshake bytes for use
when debugging
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
A list of asn1crypto.x509.Certificate objects
"""
output = []
chain_bytes = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
| python | {
"resource": ""
} |
q257624 | detect_client_auth_request | validation | def detect_client_auth_request(server_handshake_bytes):
"""
Determines if a CertificateRequest message is sent from the server asking
the client for a certificate
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
A boolean - if a client certificate request was found
"""
| python | {
"resource": ""
} |
q257625 | get_dh_params_length | validation | def get_dh_params_length(server_handshake_bytes):
"""
Determines the length of the DH params from the ServerKeyExchange
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None or an integer of the bit size of the DH parameters
"""
output = None
dh_params_bytes = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
| python | {
"resource": ""
} |
q257626 | parse_alert | validation | def parse_alert(server_handshake_bytes):
"""
Parses the handshake for protocol alerts
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None or an 2-element tuple of integers:
0: 1 (warning) or 2 (fatal)
1: The alert description (see https://tools.ietf.org/html/rfc5246#section-7.2)
"""
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
| python | {
"resource": ""
} |
q257627 | parse_session_info | validation | def parse_session_info(server_handshake_bytes, client_handshake_bytes):
"""
Parse the TLS handshake from the client to the server to extract information
including the cipher suite selected, if compression is enabled, the
session id and if a new or reused session ticket exists.
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:param client_handshake_bytes:
A byte string of the handshake data sent to the server
:return:
A dict with the following keys:
- "protocol": unicode string
- "cipher_suite": unicode string
- "compression": boolean
- "session_id": "new", "reused" or None
- "session_ticket: "new", "reused" or None
"""
protocol = None
cipher_suite = None
compression = False
session_id = None
session_ticket = None
server_session_id = None
client_session_id = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
# Ensure we are working with a ServerHello message
if message_type != b'\x02':
continue
protocol = {
b'\x03\x00': "SSLv3",
b'\x03\x01': "TLSv1",
b'\x03\x02': "TLSv1.1",
b'\x03\x03': "TLSv1.2",
b'\x03\x04': "TLSv1.3",
}[message_data[0:2]]
session_id_length = int_from_bytes(message_data[34:35])
if session_id_length > 0:
server_session_id = message_data[35:35 + session_id_length]
cipher_suite_start = 35 + session_id_length
cipher_suite_bytes = message_data[cipher_suite_start:cipher_suite_start + 2]
cipher_suite = CIPHER_SUITE_MAP[cipher_suite_bytes]
compression_start = cipher_suite_start + 2
compression = message_data[compression_start:compression_start + 1] != b'\x00'
extensions_length_start = compression_start + 1
extensions_data = message_data[extensions_length_start:]
for extension_type, extension_data in _parse_hello_extensions(extensions_data):
if extension_type == 35:
session_ticket = "new"
break
break
for record_type, _, record_data in parse_tls_records(client_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
# Ensure we are working with a ClientHello message
if message_type != b'\x01':
| python | {
"resource": ""
} |
q257628 | parse_tls_records | validation | def parse_tls_records(data):
"""
Creates a generator returning tuples of information about each record
in a byte string of data from a TLS client or server. Stops as soon as it
find a ChangeCipherSpec message since all data from then on is encrypted.
:param data:
A byte string of TLS records
:return:
A generator that yields 3-element tuples:
[0] Byte string of record type
[1] Byte string of protocol version
[2] Byte string of record data
"""
pointer = 0
data_len = len(data)
while pointer < data_len:
# | python | {
"resource": ""
} |
q257629 | parse_handshake_messages | validation | def parse_handshake_messages(data):
"""
Creates a generator returning tuples of information about each message in
a byte string of data from a TLS handshake record
:param data:
A byte string of a TLS handshake record data
:return:
A generator that yields 2-element tuples: | python | {
"resource": ""
} |
q257630 | _parse_hello_extensions | validation | def _parse_hello_extensions(data):
"""
Creates a generator returning tuples of information about each extension
from a byte string of extension data contained in a ServerHello ores
ClientHello message
:param data:
A byte string of a extension data from a TLS ServerHello or ClientHello
message
:return:
A generator that yields 2-element tuples:
[0] Byte string of extension type
[1] Byte string of extension data
"""
if data == b'':
return
extentions_length = int_from_bytes(data[0:2])
extensions_start = 2
extensions_end = 2 + extentions_length
| python | {
"resource": ""
} |
q257631 | raise_hostname | validation | def raise_hostname(certificate, hostname):
"""
Raises a TLSVerificationError due to a hostname mismatch
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
is_ip = re.match('^\\d+\\.\\d+\\.\\d+\\.\\d+$', hostname) or hostname.find(':') != -1
if is_ip:
hostname_type = 'IP address %s' % hostname
else:
hostname_type = 'domain name %s' % hostname
message = 'Server certificate verification failed - %s | python | {
"resource": ""
} |
q257632 | raise_expired_not_yet_valid | validation | def raise_expired_not_yet_valid(certificate):
"""
Raises a TLSVerificationError due to certificate being expired, or not yet
being valid
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
validity = certificate['tbs_certificate']['validity']
not_after = validity['not_after'].native
not_before = validity['not_before'].native
now = datetime.now(timezone.utc)
if not_before > now:
formatted_before = not_before.strftime('%Y-%m-%d %H:%M:%SZ') | python | {
"resource": ""
} |
q257633 | detect_other_protocol | validation | def detect_other_protocol(server_handshake_bytes):
"""
Looks at the server handshake bytes to try and detect a different protocol
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None, or a unicode string of "ftp", "http", "imap", "pop3", "smtp"
"""
if server_handshake_bytes[0:5] == b'HTTP/':
return 'HTTP'
if server_handshake_bytes[0:4] == b'220 ':
if re.match(b'^[^\r\n]*ftp', server_handshake_bytes, re.I):
return 'FTP'
else:
return 'SMTP'
| python | {
"resource": ""
} |
q257634 | _try_decode | validation | def _try_decode(byte_string):
"""
Tries decoding a byte string from the OS into a unicode string
:param byte_string:
A byte string
:return:
A unicode string
"""
try:
return str_cls(byte_string, _encoding)
# If the "correct" encoding did not work, try some defaults, and then just
# obliterate characters that we can't seen to decode properly
except (UnicodeDecodeError):
| python | {
"resource": ""
} |
q257635 | _read_callback | validation | def _read_callback(connection_id, data_buffer, data_length_pointer):
"""
Callback called by Secure Transport to actually read the socket
:param connection_id:
An integer identifing the connection
:param data_buffer:
A char pointer FFI type to write the data to
:param data_length_pointer:
A size_t pointer FFI type of the amount of data to read. Will be
overwritten with the amount of data read on return.
:return:
An integer status code of the result - 0 for success
"""
self = None
try:
self = _connection_refs.get(connection_id)
if not self:
socket = _socket_refs.get(connection_id)
else:
socket = self._socket
if not self and not socket:
return 0
bytes_requested = deref(data_length_pointer)
timeout = socket.gettimeout()
error = None
data = b''
try:
while len(data) < bytes_requested:
# Python 2 on Travis CI seems to have issues with blocking on
# recv() for longer than the socket timeout value, so we select
if timeout is not None and timeout > 0.0:
read_ready, _, _ = select.select([socket], [], [], timeout)
if len(read_ready) == 0:
raise socket_.error(errno.EAGAIN, 'timed out')
chunk = socket.recv(bytes_requested - len(data))
data += chunk
if chunk == b'':
if len(data) == 0:
if timeout is None:
return SecurityConst.errSSLClosedNoNotify
return SecurityConst.errSSLClosedAbort
break
except (socket_.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedNoNotify
return SecurityConst.errSSLClosedAbort
| python | {
"resource": ""
} |
q257636 | _read_remaining | validation | def _read_remaining(socket):
"""
Reads everything available from the socket - used for debugging when there
is a protocol error
:param socket:
The socket to read from
:return:
A byte string of the remaining data
"""
output = b''
old_timeout = socket.gettimeout()
| python | {
"resource": ""
} |
q257637 | _write_callback | validation | def _write_callback(connection_id, data_buffer, data_length_pointer):
"""
Callback called by Secure Transport to actually write to the socket
:param connection_id:
An integer identifing the connection
:param data_buffer:
A char pointer FFI type containing the data to write
:param data_length_pointer:
A size_t pointer FFI type of the amount of data to write. Will be
overwritten with the amount of data actually written on return.
:return:
An integer status code of the result - 0 for success
"""
try:
self = _connection_refs.get(connection_id)
if not self:
socket = _socket_refs.get(connection_id)
else:
socket = self._socket
if not self and not socket:
return 0
data_length = deref(data_length_pointer)
data = bytes_from_buffer(data_buffer, data_length)
if self and not self._done_handshake:
self._client_hello += data
error = None
try:
| python | {
"resource": ""
} |
q257638 | get_path | validation | def get_path(temp_dir=None, cache_length=24, cert_callback=None):
"""
Get the filesystem path to a file that contains OpenSSL-compatible CA certs.
On OS X and Windows, there are extracted from the system certificate store
and cached in a file on the filesystem. This path should not be writable
by other users, otherwise they could inject CA certs into the trust list.
:param temp_dir:
The temporary directory to cache the CA certs in on OS X and Windows.
Needs to have secure permissions so other users can not modify the
contents.
:param cache_length:
The number of hours to cache the CA certs on OS X and Windows
:param cert_callback:
A callback that is called once for each certificate in the trust store.
It should accept two parameters: an asn1crypto.x509.Certificate object,
and a reason. The reason will be None if the certificate is being
exported, otherwise it will be a unicode string of the reason it won't.
This is only called on Windows and OS X when passed to this function.
:raises:
oscrypto.errors.CACertsError - when an error occurs exporting/locating certs
:return:
The full filesystem path to a CA certs file
"""
ca_path, temp = _ca_path(temp_dir)
# Windows and OS X
if temp and _cached_path_needs_update(ca_path, cache_length):
empty_set = set()
any_purpose = '2.5.29.37.0'
apple_ssl = '1.2.840.113635.100.1.3'
win_server_auth = '1.3.6.1.5.5.7.3.1'
with path_lock:
if _cached_path_needs_update(ca_path, cache_length):
with open(ca_path, 'wb') as f:
for cert, trust_oids, reject_oids in extract_from_system(cert_callback, True):
if sys.platform == 'darwin':
if trust_oids != empty_set and any_purpose not in trust_oids \
and apple_ssl not in trust_oids:
if cert_callback:
cert_callback(Certificate.load(cert), 'implicitly distrusted for TLS')
| python | {
"resource": ""
} |
q257639 | _map_oids | validation | def _map_oids(oids):
"""
Takes a set of unicode string OIDs and converts vendor-specific OIDs into
generics OIDs from RFCs.
- 1.2.840.113635.100.1.3 (apple_ssl) -> 1.3.6.1.5.5.7.3.1 (server_auth)
- 1.2.840.113635.100.1.3 (apple_ssl) -> 1.3.6.1.5.5.7.3.2 (client_auth)
- 1.2.840.113635.100.1.8 (apple_smime) -> 1.3.6.1.5.5.7.3.4 (email_protection)
- 1.2.840.113635.100.1.9 (apple_eap) -> 1.3.6.1.5.5.7.3.13 (eap_over_ppp)
- 1.2.840.113635.100.1.9 (apple_eap) -> 1.3.6.1.5.5.7.3.14 (eap_over_lan)
- 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.5 (ipsec_end_system)
- 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.6 (ipsec_tunnel)
- 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.7 (ipsec_user)
- 1.2.840.113635.100.1.11 (apple_ipsec) -> 1.3.6.1.5.5.7.3.17 | python | {
"resource": ""
} |
q257640 | _cached_path_needs_update | validation | def _cached_path_needs_update(ca_path, cache_length):
"""
Checks to see if a cache file needs to be refreshed
:param ca_path:
A unicode string of the path to the cache file
:param cache_length:
An integer representing the number of hours the cache is valid | python | {
"resource": ""
} |
q257641 | Service.version | validation | def version(self):
""" Create a new version under this service. """
ver = Version()
ver.conn = self.conn
ver.attrs = {
# Parent params
| python | {
"resource": ""
} |
q257642 | Version.vcl | validation | def vcl(self, name, content):
""" Create a new VCL under this version. """
vcl = VCL()
vcl.conn = self.conn
vcl.attrs = {
# Parent params
'service_id': self.attrs['service_id'],
'version': self.attrs['number'],
| python | {
"resource": ""
} |
q257643 | BaseColumn.to_dict | validation | def to_dict(self):
"""
Converts the column to a dictionary representation accepted
by the Citrination server.
:return: Dictionary with basic options, plus any column type specific
options held under the "options" key
:rtype: dict
"""
return {
"type": self.type,
"name": self.name,
| python | {
"resource": ""
} |
q257644 | DataViewBuilder.add_descriptor | validation | def add_descriptor(self, descriptor, role='ignore', group_by_key=False):
"""
Add a descriptor column.
:param descriptor: A Descriptor instance (e.g., RealDescriptor, InorganicDescriptor, etc.)
:param role: Specify a role (input, output, latentVariable, or ignore)
:param group_by_key: Whether or not to group by this key during cross validation
"""
descriptor.validate()
| python | {
"resource": ""
} |
q257645 | BaseClient._patch | validation | def _patch(self, route, data, headers=None, failure_message=None):
"""
Execute a patch request and return the result
"""
headers = self._get_headers(headers)
response_lambda = (
lambda: requests.patch(
self._get_qualified_route(route), headers=headers, data=data, verify=False, | python | {
"resource": ""
} |
q257646 | SearchClient._validate_search_query | validation | def _validate_search_query(self, returning_query):
"""
Checks to see that the query will not exceed the max query depth
:param returning_query: The PIF system or Dataset query to execute.
:type returning_query: :class:`PifSystemReturningQuery` or :class: `DatasetReturningQuery`
"""
start_index = returning_query.from_index or 0
size = returning_query.size or 0
if start_index < 0:
raise CitrinationClientError(
"start_index cannot be negative. Please enter a value greater than or equal to zero")
if size < 0:
| python | {
"resource": ""
} |
q257647 | SearchClient.dataset_search | validation | def dataset_search(self, dataset_returning_query):
"""
Run a dataset query against Citrination.
:param dataset_returning_query: :class:`DatasetReturningQuery` to execute.
:type dataset_returning_query: :class:`DatasetReturningQuery`
:return: Dataset search result object with the results of the query.
:rtype: :class:`DatasetSearchResult`
| python | {
"resource": ""
} |
q257648 | SearchClient.pif_multi_search | validation | def pif_multi_search(self, multi_query):
"""
Run each in a list of PIF queries against Citrination.
:param multi_query: :class:`MultiQuery` object to execute.
:return: :class:`PifMultiSearchResult` object with the results of the query.
"""
failure_message = "Error while making PIF multi search request"
| python | {
"resource": ""
} |
q257649 | check_for_rate_limiting | validation | def check_for_rate_limiting(response, response_lambda, timeout=1, attempts=0):
"""
Takes an initial response, and a way to repeat the request that produced it and retries the request with an increasing sleep period between requests if rate limiting resposne codes are encountered.
If more than 3 attempts are made, a RateLimitingException is raised
:param response: A response from Citrination
:type response: requests.Response
:param response_lambda: a callable that runs the request that returned the
response
:type response_lambda: function
:param timeout: the time to wait before retrying
:type timeout: int
:param attempts: the number of the retry being executed
:type attempts: int
| python | {
"resource": ""
} |
q257650 | DataViewsClient.create | validation | def create(self, configuration, name, description):
"""
Creates a data view from the search template and ml template given
:param configuration: Information to construct the data view from (eg descriptors, datasets etc)
:param name: Name of the data view
:param description: Description for the data view
| python | {
"resource": ""
} |
q257651 | DataViewsClient.update | validation | def update(self, id, configuration, name, description):
"""
Updates an existing data view from the search template and ml template given
:param id: Identifier for the data view. This returned from the create method.
:param configuration: Information to construct the data view from (eg descriptors, datasets etc)
:param name: Name of the data view
:param description: Description for the data view
"""
data = {
"configuration":
| python | {
"resource": ""
} |
q257652 | DataViewsClient.get | validation | def get(self, data_view_id):
"""
Gets basic information about a view
:param data_view_id: Identifier of the data view
:return: Metadata about the view as JSON
"""
failure_message = "Dataview get failed"
| python | {
"resource": ""
} |
q257653 | DataViewsClient.create_ml_configuration_from_datasets | validation | def create_ml_configuration_from_datasets(self, dataset_ids):
"""
Creates an ml configuration from dataset_ids and extract_as_keys
:param dataset_ids: Array of dataset identifiers to make search template from
:return: An identifier used to request the status of the builder job (get_ml_configuration_status)
"""
available_columns = self.search_template_client.get_available_columns(dataset_ids)
| python | {
"resource": ""
} |
q257654 | DataViewsClient.create_ml_configuration | validation | def create_ml_configuration(self, search_template, extract_as_keys, dataset_ids):
"""
This method will spawn a server job to create a default ML configuration based on a search template and
the extract as keys.
This function will submit the request to build, and wait for the configuration to finish before returning.
:param search_template: A search template defining the query (properties, datasets etc)
:param extract_as_keys: Array of extract-as keys defining the descriptors
:param dataset_ids: Array of dataset identifiers to make search template from
:return: An identifier used to request the status of the builder job (get_ml_configuration_status)
"""
data = {
"search_template":
search_template,
"extract_as_keys":
extract_as_keys
}
failure_message = "ML Configuration creation failed"
config_job_id = self._get_success_json(self._post_json(
| python | {
"resource": ""
} |
q257655 | DataViewsClient.__convert_response_to_configuration | validation | def __convert_response_to_configuration(self, result_blob, dataset_ids):
"""
Utility function to turn the result object from the configuration builder endpoint into something that
can be used directly as a configuration.
:param result_blob: Nested dicts representing the possible descriptors
:param dataset_ids: Array of dataset identifiers to make search template from
:return: An object suitable to be used as a parameter to data view create
"""
builder = DataViewBuilder()
builder.dataset_ids(dataset_ids)
for i, (k, v) in enumerate(result_blob['descriptors'].items()):
try:
| python | {
"resource": ""
} |
q257656 | DataViewsClient.__get_ml_configuration_status | validation | def __get_ml_configuration_status(self, job_id):
"""
After invoking the create_ml_configuration async method, you can use this method to
check on the status of the builder job.
:param job_id: The identifier returned from create_ml_configuration
:return: Job status
"""
failure_message = "Get status | python | {
"resource": ""
} |
q257657 | ModelsClient.tsne | validation | def tsne(self, data_view_id):
"""
Get the t-SNE projection, including responses and tags.
:param data_view_id: The ID of the data view to retrieve TSNE from
:type data_view_id: int
:return: The TSNE analysis
:rtype: :class:`Tsne`
"""
analysis = self._data_analysis(data_view_id)
projections = analysis['projections']
tsne = Tsne()
for k, v in projections.items():
projection = Projection(
| python | {
"resource": ""
} |
q257658 | ModelsClient._data_analysis | validation | def _data_analysis(self, data_view_id):
"""
Data analysis endpoint.
:param data_view_id: The model identifier (id number for data views)
:type data_view_id: str
:return: dictionary containing | python | {
"resource": ""
} |
q257659 | ModelsClient.submit_predict_request | validation | def submit_predict_request(self, data_view_id, candidates, prediction_source='scalar', use_prior=True):
"""
Submits an async prediction request.
:param data_view_id: The id returned from create
:param candidates: Array of candidates
:param prediction_source: 'scalar' or 'scalar_from_distribution'
:param use_prior: True to use prior prediction, otherwise False
| python | {
"resource": ""
} |
q257660 | ModelsClient.check_predict_status | validation | def check_predict_status(self, view_id, predict_request_id):
"""
Returns a string indicating the status of the prediction job
:param view_id: The data view id returned from data view create
:param predict_request_id: The id returned from predict
:return: Status data, also includes results if state is finished
"""
failure_message = "Get status on predict failed"
| python | {
"resource": ""
} |
q257661 | ModelsClient.submit_design_run | validation | def submit_design_run(self, data_view_id, num_candidates, effort, target=None, constraints=[], sampler="Default"):
"""
Submits a new experimental design run.
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
:param num_candidates: The number of candidates to return
:type num_candidates: int
:param target: An :class:``Target`` instance representing
the design run optimization target
:type target: :class:``Target``
:param constraints: An array of design constraints (instances of
objects which extend :class:``BaseConstraint``)
:type constraints: list of :class:``BaseConstraint``
:param sampler: The name of the sampler to use during the design run:
either "Default" or "This view"
:type sampler: str
:return: A :class:`DesignRun` instance containing the UID of the
new run
"""
if effort > 30:
raise CitrinationClientError("Parameter effort must be less than 30 to trigger a design run")
| python | {
"resource": ""
} |
q257662 | ModelsClient.get_design_run_status | validation | def get_design_run_status(self, data_view_id, run_uuid):
"""
Retrieves the status of an in progress or completed design run
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
:param run_uuid: The UUID of the design run to retrieve | python | {
"resource": ""
} |
q257663 | ModelsClient.get_design_run_results | validation | def get_design_run_results(self, data_view_id, run_uuid):
"""
Retrieves the results of an existing designrun
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
:param run_uuid: The UUID of the design run to | python | {
"resource": ""
} |
q257664 | ModelsClient.get_data_view | validation | def get_data_view(self, data_view_id):
"""
Retrieves a summary of information for a given data view
- view id
- name
- description
- columns
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
"""
url = routes.get_data_view(data_view_id)
response = self._get(url).json()
result = response["data"]["data_view"]
datasets_list = []
for dataset in result["datasets"]:
datasets_list.append(Dataset(
name=dataset["name"],
| python | {
"resource": ""
} |
q257665 | ModelsClient.kill_design_run | validation | def kill_design_run(self, data_view_id, run_uuid):
"""
Kills an in progress experimental design run
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
:param run_uuid: The UUID of the design run to kill
| python | {
"resource": ""
} |
q257666 | load_file_as_yaml | validation | def load_file_as_yaml(path):
"""
Given a filepath, loads the file as a dictionary from YAML
:param path: The path to a YAML file
"""
with open(path, | python | {
"resource": ""
} |
q257667 | get_credentials_from_file | validation | def get_credentials_from_file(filepath):
"""
Extracts credentials from the yaml formatted credential filepath
passed in. Uses the default profile if the CITRINATION_PROFILE env var
is not set, otherwise looks for a profile with that name in the credentials file.
:param filepath: The path of the credentials file
"""
| python | {
"resource": ""
} |
q257668 | get_preferred_credentials | validation | def get_preferred_credentials(api_key, site, cred_file=DEFAULT_CITRINATION_CREDENTIALS_FILE):
"""
Given an API key, a site url and a credentials file path, runs through a prioritized list of credential sources to find credentials.
Specifically, this method ranks credential priority as follows:
1. Those passed in as the first two parameters to this method
2. Those found in the environment as variables
3. Those found in the credentials file at the profile specified
by the profile environment variable
4. Those found in the default stanza in the credentials file
:param api_key: A Citrination API Key or None
:param site: A Citrination site URL or None
:param cred_file: The path to a credentials file
| python | {
"resource": ""
} |
q257669 | DataClient.list_files | validation | def list_files(self, dataset_id, glob=".", is_dir=False):
"""
List matched filenames in a dataset on Citrination.
:param dataset_id: The ID of the dataset to search for files.
:type dataset_id: int
:param glob: A pattern which will be matched against files in the dataset.
:type glob: str
:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.
:type is_dir: bool
:return: A list of filepaths in the dataset matching the provided glob.
:rtype: list of strings
"""
| python | {
"resource": ""
} |
q257670 | DataClient.matched_file_count | validation | def matched_file_count(self, dataset_id, glob=".", is_dir=False):
"""
Returns the number of files matching a pattern in a dataset.
:param dataset_id: The ID of the dataset to search for files.
:type dataset_id: int
:param glob: A pattern which will be matched against files in the dataset.
:type glob: str
:param is_dir: A boolean indicating whether or not | python | {
"resource": ""
} |
q257671 | DataClient.get_dataset_files | validation | def get_dataset_files(self, dataset_id, glob=".", is_dir=False, version_number=None):
"""
Retrieves URLs for the files matched by a glob or a path to a directory
in a given dataset.
:param dataset_id: The id of the dataset to retrieve files from
:type dataset_id: int
:param glob: A regex used to select one or more files in the dataset
:type glob: str
:param is_dir: Whether or not the supplied pattern should be treated as a directory to search in
:type is_dir: bool
:param version_number: The version number of the dataset to retrieve files from
:type version_number: int
:return: A list of dataset files whose paths match the provided pattern.
:rtype: list of :class:`DatasetFile`
"""
if version_number is None:
latest = True
else:
latest = False
data = {
"download_request": {
"glob": glob,
"isDir": is_dir,
| python | {
"resource": ""
} |
q257672 | DataClient.get_dataset_file | validation | def get_dataset_file(self, dataset_id, file_path, version = None):
"""
Retrieves a dataset file matching a provided file path
:param dataset_id: The id of the dataset to retrieve file from
:type dataset_id: int
:param file_path: The file path within the dataset
:type file_path: str
:param version: The dataset version to look for the file in. If nothing is supplied, | python | {
"resource": ""
} |
q257673 | DataClient.get_pif | validation | def get_pif(self, dataset_id, uid, dataset_version = None):
"""
Retrieves a PIF from a given dataset.
:param dataset_id: The id of the dataset to retrieve PIF from
:type dataset_id: int
:param uid: The uid of the PIF to retrieve
:type uid: str
:param dataset_version: The dataset version to look for the PIF in. If nothing is supplied, the latest dataset version will be searched
:type dataset_version: int
:return: A :class:`Pif` object
:rtype: :class:`Pif`
"""
failure_message = "An error occurred retrieving PIF {}".format(uid)
| python | {
"resource": ""
} |
q257674 | DataClient.create_dataset | validation | def create_dataset(self, name=None, description=None, public=False):
"""
Create a new data set.
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should be public.
:type public: bool
:return: The newly created dataset.
:rtype: :class:`Dataset`
"""
data = {
"public": _convert_bool_to_public_value(public)
}
| python | {
"resource": ""
} |
q257675 | DataClient.update_dataset | validation | def update_dataset(self, dataset_id, name=None, description=None, public=None):
"""
Update a data set.
:param dataset_id: The ID of the dataset to update
:type dataset_id: int
:param name: name of the dataset
:type name: str
:param description: description for the dataset
:type description: str
:param public: A boolean indicating whether or not the dataset should
| python | {
"resource": ""
} |
q257676 | DataClient.create_dataset_version | validation | def create_dataset_version(self, dataset_id):
"""
Create a new data set version.
:param dataset_id: The ID of the dataset for which the version must be bumped.
:type dataset_id: int
:return: The new dataset version.
:rtype: :class:`DatasetVersion`
"""
failure_message = "Failed to create dataset version for | python | {
"resource": ""
} |
q257677 | SearchTemplateClient.get_available_columns | validation | def get_available_columns(self, dataset_ids):
"""
Retrieves the set of columns from the combination of dataset ids given
:param dataset_ids: The id of the dataset to retrieve columns from
:type dataset_ids: list of int
:return: A list of column names from the dataset ids given.
:rtype: list of str
"""
if not isinstance(dataset_ids, list):
dataset_ids = [dataset_ids]
data = {
| python | {
"resource": ""
} |
q257678 | SearchTemplateClient.__generate_search_template | validation | def __generate_search_template(self, dataset_ids):
"""
Generates a default search templates from the available columns in the dataset ids given.
:param dataset_ids: The id of the dataset to retrieve files from
:type dataset_ids: list of int
:return: A search template based on the columns in the datasets given
"""
data = {
"dataset_ids":
dataset_ids
}
| python | {
"resource": ""
} |
q257679 | SearchTemplateClient.__prune_search_template | validation | def __prune_search_template(self, extract_as_keys, search_template):
"""
Returns a new search template, but the new template has only the extract_as_keys given.
:param extract_as_keys: List of extract as keys to keep
:param search_template: The search template to prune
:return: New search template with pruned columns
"""
data = {
"extract_as_keys":
extract_as_keys,
| python | {
"resource": ""
} |
q257680 | QueryEncoder.default | validation | def default(self, obj):
"""
Convert an object to a form ready to dump to json.
:param obj: Object being serialized. The type of this object must be one of the following: None; a single object derived from the Pio class; or a list of objects, each derived from the Pio class. | python | {
"resource": ""
} |
q257681 | QueryEncoder._keys_to_camel_case | validation | def _keys_to_camel_case(self, obj):
"""
Make a copy of a dictionary with all keys converted to camel case. This is just calls to_camel_case on each of the keys in the dictionary and | python | {
"resource": ""
} |
q257682 | ModelTemplateClient.validate | validation | def validate(self, ml_template):
"""
Runs the template against the validation endpoint, returns a message indicating status of the templte
:param ml_template: Template to validate
:return: OK or error message if validation failed
"""
data = {
"ml_template":
ml_template
}
failure_message = "ML template | python | {
"resource": ""
} |
q257683 | add_organization_course | validation | def add_organization_course(organization_data, course_key):
"""
Adds a organization-course link to the system
"""
_validate_course_key(course_key)
_validate_organization_data(organization_data)
| python | {
"resource": ""
} |
q257684 | remove_organization_course | validation | def remove_organization_course(organization, course_key):
"""
Removes the specfied course from the specified organization
"""
| python | {
"resource": ""
} |
q257685 | course_key_is_valid | validation | def course_key_is_valid(course_key):
"""
Course key object validation
"""
if course_key is None:
return False
try:
| python | {
"resource": ""
} |
q257686 | organization_data_is_valid | validation | def organization_data_is_valid(organization_data):
"""
Organization data validation
"""
if organization_data is None:
return False
if 'id' in organization_data and not organization_data.get('id'):
| python | {
"resource": ""
} |
q257687 | _inactivate_organization | validation | def _inactivate_organization(organization):
"""
Inactivates an activated organization as well as any active relationships
"""
[_inactivate_organization_course_relationship(record) for record
in internal.OrganizationCourse.objects.filter(organization_id=organization.id, | python | {
"resource": ""
} |
q257688 | _activate_organization_course_relationship | validation | def _activate_organization_course_relationship(relationship): # pylint: disable=invalid-name
"""
Activates an inactive organization-course relationship
"""
# If the relationship doesn't exist or the organization isn't active we'll want to raise an error
relationship | python | {
"resource": ""
} |
q257689 | _inactivate_organization_course_relationship | validation | def _inactivate_organization_course_relationship(relationship): # pylint: disable=invalid-name
"""
Inactivates an active organization-course relationship
"""
| python | {
"resource": ""
} |
q257690 | fetch_organization_courses | validation | def fetch_organization_courses(organization):
"""
Retrieves the set of courses currently linked to the specified organization
"""
organization_obj = serializers.deserialize_organization(organization)
queryset = internal.OrganizationCourse.objects.filter(
organization=organization_obj,
| python | {
"resource": ""
} |
q257691 | fetch_course_organizations | validation | def fetch_course_organizations(course_key):
"""
Retrieves the organizations linked to the specified course
"""
queryset = internal.OrganizationCourse.objects.filter(
course_id=text_type(course_key),
active=True | python | {
"resource": ""
} |
q257692 | serialize_organization | validation | def serialize_organization(organization):
"""
Organization object-to-dict serialization
"""
return {
'id': organization.id,
'name': organization.name,
| python | {
"resource": ""
} |
q257693 | deserialize_organization | validation | def deserialize_organization(organization_dict):
"""
Organization dict-to-object serialization
"""
return models.Organization(
id=organization_dict.get('id'),
name=organization_dict.get('name', ''),
| python | {
"resource": ""
} |
q257694 | ImageExtractor.is_valid_filename | validation | def is_valid_filename(self, image_node):
"""\
will check the image src against a list
of bad image files we know of like buttons, etc...
"""
src = self.parser.getAttribute(image_node, attr='src')
if not src:
| python | {
"resource": ""
} |
q257695 | ImageExtractor.get_images_bytesize_match | validation | def get_images_bytesize_match(self, images):
"""\
loop through all the images and find the ones
that have the best bytez to even make them a candidate
"""
cnt = 0
max_bytes_size = 15728640
good_images = []
for image in images:
if cnt > 30:
return good_images
src = self.parser.getAttribute(image, attr='src')
src = self.build_image_path(src)
src = self.add_schema_if_none(src)
local_image = self.get_local_image(src)
if local_image:
| python | {
"resource": ""
} |
q257696 | ImageExtractor.check_link_tag | validation | def check_link_tag(self):
"""\
checks to see if we were able to
find open link_src on this page
"""
node = self.article.raw_doc
meta = self.parser.getElementsByTag(node, tag='link', attr='rel', value='image_src')
for item in meta:
| python | {
"resource": ""
} |
q257697 | ImageExtractor.get_local_image | validation | def get_local_image(self, src):
"""\
returns the bytes of the image file on disk
"""
| python | {
"resource": ""
} |
q257698 | VideoExtractor.get_video | validation | def get_video(self, node):
"""
Create a video object from a video embed
"""
video = Video()
video._embed_code = self.get_embed_code(node)
| python | {
"resource": ""
} |
q257699 | ImageUtils.store_image | validation | def store_image(cls, http_client, link_hash, src, config):
"""\
Writes an image src http string to disk as a temporary file
and returns the LocallyStoredImage object
that has the info you should need on the image
"""
# check for a cache hit already on disk
image = cls.read_localfile(link_hash, src, config)
if image:
return image
# no cache found; do something else
# parse base64 image
if src.startswith('data:image'):
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.