_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q269800
X509Name.der
test
def der(self): """ Return the DER encoding of this name. :return: The DER encoded form of this name. :rtype: :py:class:`bytes` """ result_buffer = _ffi.new('unsigned char**') encode_result = _lib.i2d_X509_NAME(self._name, result_buffer) _openssl_assert(encode_result >= 0) string_result = _ffi.buffer(result_buffer[0], encode_result)[:] _lib.OPENSSL_free(result_buffer[0]) return string_result
python
{ "resource": "" }
q269801
X509Name.get_components
test
def get_components(self): """ Returns the components of this name, as a sequence of 2-tuples. :return: The components of this name. :rtype: :py:class:`list` of ``name, value`` tuples. """ result = [] for i in range(_lib.X509_NAME_entry_count(self._name)): ent = _lib.X509_NAME_get_entry(self._name, i) fname = _lib.X509_NAME_ENTRY_get_object(ent) fval = _lib.X509_NAME_ENTRY_get_data(ent) nid = _lib.OBJ_obj2nid(fname) name = _lib.OBJ_nid2sn(nid) # ffi.string does not handle strings containing NULL bytes # (which may have been generated by old, broken software) value = _ffi.buffer(_lib.ASN1_STRING_data(fval), _lib.ASN1_STRING_length(fval))[:] result.append((_ffi.string(name), value)) return result
python
{ "resource": "" }
q269802
X509Extension.get_short_name
test
def get_short_name(self): """ Returns the short type name of this X.509 extension. The result is a byte string such as :py:const:`b"basicConstraints"`. :return: The short type name. :rtype: :py:data:`bytes` .. versionadded:: 0.12 """ obj = _lib.X509_EXTENSION_get_object(self._extension) nid = _lib.OBJ_obj2nid(obj) return _ffi.string(_lib.OBJ_nid2sn(nid))
python
{ "resource": "" }
q269803
X509Extension.get_data
test
def get_data(self): """ Returns the data of the X509 extension, encoded as ASN.1. :return: The ASN.1 encoded data of this X509 extension. :rtype: :py:data:`bytes` .. versionadded:: 0.12 """ octet_result = _lib.X509_EXTENSION_get_data(self._extension) string_result = _ffi.cast('ASN1_STRING*', octet_result) char_result = _lib.ASN1_STRING_data(string_result) result_length = _lib.ASN1_STRING_length(string_result) return _ffi.buffer(char_result, result_length)[:]
python
{ "resource": "" }
q269804
X509Req.to_cryptography
test
def to_cryptography(self): """ Export as a ``cryptography`` certificate signing request. :rtype: ``cryptography.x509.CertificateSigningRequest`` .. versionadded:: 17.1.0 """ from cryptography.hazmat.backends.openssl.x509 import ( _CertificateSigningRequest ) backend = _get_backend() return _CertificateSigningRequest(backend, self._req)
python
{ "resource": "" }
q269805
X509Req.set_pubkey
test
def set_pubkey(self, pkey): """ Set the public key of the certificate signing request. :param pkey: The public key to use. :type pkey: :py:class:`PKey` :return: ``None`` """ set_result = _lib.X509_REQ_set_pubkey(self._req, pkey._pkey) _openssl_assert(set_result == 1)
python
{ "resource": "" }
q269806
X509Req.get_pubkey
test
def get_pubkey(self): """ Get the public key of the certificate signing request. :return: The public key. :rtype: :py:class:`PKey` """ pkey = PKey.__new__(PKey) pkey._pkey = _lib.X509_REQ_get_pubkey(self._req) _openssl_assert(pkey._pkey != _ffi.NULL) pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free) pkey._only_public = True return pkey
python
{ "resource": "" }
q269807
X509Req.get_subject
test
def get_subject(self): """ Return the subject of this certificate signing request. This creates a new :class:`X509Name` that wraps the underlying subject name field on the certificate signing request. Modifying it will modify the underlying signing request, and will have the effect of modifying any other :class:`X509Name` that refers to this subject. :return: The subject of this certificate signing request. :rtype: :class:`X509Name` """ name = X509Name.__new__(X509Name) name._name = _lib.X509_REQ_get_subject_name(self._req) _openssl_assert(name._name != _ffi.NULL) # The name is owned by the X509Req structure. As long as the X509Name # Python object is alive, keep the X509Req Python object alive. name._owner = self return name
python
{ "resource": "" }
q269808
X509Req.add_extensions
test
def add_extensions(self, extensions): """ Add extensions to the certificate signing request. :param extensions: The X.509 extensions to add. :type extensions: iterable of :py:class:`X509Extension` :return: ``None`` """ stack = _lib.sk_X509_EXTENSION_new_null() _openssl_assert(stack != _ffi.NULL) stack = _ffi.gc(stack, _lib.sk_X509_EXTENSION_free) for ext in extensions: if not isinstance(ext, X509Extension): raise ValueError("One of the elements is not an X509Extension") # TODO push can fail (here and elsewhere) _lib.sk_X509_EXTENSION_push(stack, ext._extension) add_result = _lib.X509_REQ_add_extensions(self._req, stack) _openssl_assert(add_result == 1)
python
{ "resource": "" }
q269809
X509Req.get_extensions
test
def get_extensions(self): """ Get X.509 extensions in the certificate signing request. :return: The X.509 extensions in this request. :rtype: :py:class:`list` of :py:class:`X509Extension` objects. .. versionadded:: 0.15 """ exts = [] native_exts_obj = _lib.X509_REQ_get_extensions(self._req) for i in range(_lib.sk_X509_EXTENSION_num(native_exts_obj)): ext = X509Extension.__new__(X509Extension) ext._extension = _lib.sk_X509_EXTENSION_value(native_exts_obj, i) exts.append(ext) return exts
python
{ "resource": "" }
q269810
X509Req.verify
test
def verify(self, pkey): """ Verifies the signature on this certificate signing request. :param PKey key: A public key. :return: ``True`` if the signature is correct. :rtype: bool :raises OpenSSL.crypto.Error: If the signature is invalid or there is a problem verifying the signature. """ if not isinstance(pkey, PKey): raise TypeError("pkey must be a PKey instance") result = _lib.X509_REQ_verify(self._req, pkey._pkey) if result <= 0: _raise_current_error() return result
python
{ "resource": "" }
q269811
X509.to_cryptography
test
def to_cryptography(self): """ Export as a ``cryptography`` certificate. :rtype: ``cryptography.x509.Certificate`` .. versionadded:: 17.1.0 """ from cryptography.hazmat.backends.openssl.x509 import _Certificate backend = _get_backend() return _Certificate(backend, self._x509)
python
{ "resource": "" }
q269812
X509.set_version
test
def set_version(self, version): """ Set the version number of the certificate. Note that the version value is zero-based, eg. a value of 0 is V1. :param version: The version number of the certificate. :type version: :py:class:`int` :return: ``None`` """ if not isinstance(version, int): raise TypeError("version must be an integer") _lib.X509_set_version(self._x509, version)
python
{ "resource": "" }
q269813
X509.get_pubkey
test
def get_pubkey(self): """ Get the public key of the certificate. :return: The public key. :rtype: :py:class:`PKey` """ pkey = PKey.__new__(PKey) pkey._pkey = _lib.X509_get_pubkey(self._x509) if pkey._pkey == _ffi.NULL: _raise_current_error() pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free) pkey._only_public = True return pkey
python
{ "resource": "" }
q269814
X509.set_pubkey
test
def set_pubkey(self, pkey): """ Set the public key of the certificate. :param pkey: The public key. :type pkey: :py:class:`PKey` :return: :py:data:`None` """ if not isinstance(pkey, PKey): raise TypeError("pkey must be a PKey instance") set_result = _lib.X509_set_pubkey(self._x509, pkey._pkey) _openssl_assert(set_result == 1)
python
{ "resource": "" }
q269815
X509.sign
test
def sign(self, pkey, digest): """ Sign the certificate with this key and digest type. :param pkey: The key to sign with. :type pkey: :py:class:`PKey` :param digest: The name of the message digest to use. :type digest: :py:class:`bytes` :return: :py:data:`None` """ if not isinstance(pkey, PKey): raise TypeError("pkey must be a PKey instance") if pkey._only_public: raise ValueError("Key only has public part") if not pkey._initialized: raise ValueError("Key is uninitialized") evp_md = _lib.EVP_get_digestbyname(_byte_string(digest)) if evp_md == _ffi.NULL: raise ValueError("No such digest method") sign_result = _lib.X509_sign(self._x509, pkey._pkey, evp_md) _openssl_assert(sign_result > 0)
python
{ "resource": "" }
q269816
X509.get_signature_algorithm
test
def get_signature_algorithm(self): """ Return the signature algorithm used in the certificate. :return: The name of the algorithm. :rtype: :py:class:`bytes` :raises ValueError: If the signature algorithm is undefined. .. versionadded:: 0.13 """ algor = _lib.X509_get0_tbs_sigalg(self._x509) nid = _lib.OBJ_obj2nid(algor.algorithm) if nid == _lib.NID_undef: raise ValueError("Undefined signature algorithm") return _ffi.string(_lib.OBJ_nid2ln(nid))
python
{ "resource": "" }
q269817
X509.digest
test
def digest(self, digest_name): """ Return the digest of the X509 object. :param digest_name: The name of the digest algorithm to use. :type digest_name: :py:class:`bytes` :return: The digest of the object, formatted as :py:const:`b":"`-delimited hex pairs. :rtype: :py:class:`bytes` """ digest = _lib.EVP_get_digestbyname(_byte_string(digest_name)) if digest == _ffi.NULL: raise ValueError("No such digest method") result_buffer = _ffi.new("unsigned char[]", _lib.EVP_MAX_MD_SIZE) result_length = _ffi.new("unsigned int[]", 1) result_length[0] = len(result_buffer) digest_result = _lib.X509_digest( self._x509, digest, result_buffer, result_length) _openssl_assert(digest_result == 1) return b":".join([ b16encode(ch).upper() for ch in _ffi.buffer(result_buffer, result_length[0])])
python
{ "resource": "" }
q269818
X509.set_serial_number
test
def set_serial_number(self, serial): """ Set the serial number of the certificate. :param serial: The new serial number. :type serial: :py:class:`int` :return: :py:data`None` """ if not isinstance(serial, _integer_types): raise TypeError("serial must be an integer") hex_serial = hex(serial)[2:] if not isinstance(hex_serial, bytes): hex_serial = hex_serial.encode('ascii') bignum_serial = _ffi.new("BIGNUM**") # BN_hex2bn stores the result in &bignum. Unless it doesn't feel like # it. If bignum is still NULL after this call, then the return value # is actually the result. I hope. -exarkun small_serial = _lib.BN_hex2bn(bignum_serial, hex_serial) if bignum_serial[0] == _ffi.NULL: set_result = _lib.ASN1_INTEGER_set( _lib.X509_get_serialNumber(self._x509), small_serial) if set_result: # TODO Not tested _raise_current_error() else: asn1_serial = _lib.BN_to_ASN1_INTEGER(bignum_serial[0], _ffi.NULL) _lib.BN_free(bignum_serial[0]) if asn1_serial == _ffi.NULL: # TODO Not tested _raise_current_error() asn1_serial = _ffi.gc(asn1_serial, _lib.ASN1_INTEGER_free) set_result = _lib.X509_set_serialNumber(self._x509, asn1_serial) _openssl_assert(set_result == 1)
python
{ "resource": "" }
q269819
X509.get_serial_number
test
def get_serial_number(self): """ Return the serial number of this certificate. :return: The serial number. :rtype: int """ asn1_serial = _lib.X509_get_serialNumber(self._x509) bignum_serial = _lib.ASN1_INTEGER_to_BN(asn1_serial, _ffi.NULL) try: hex_serial = _lib.BN_bn2hex(bignum_serial) try: hexstring_serial = _ffi.string(hex_serial) serial = int(hexstring_serial, 16) return serial finally: _lib.OPENSSL_free(hex_serial) finally: _lib.BN_free(bignum_serial)
python
{ "resource": "" }
q269820
X509.gmtime_adj_notAfter
test
def gmtime_adj_notAfter(self, amount): """ Adjust the time stamp on which the certificate stops being valid. :param int amount: The number of seconds by which to adjust the timestamp. :return: ``None`` """ if not isinstance(amount, int): raise TypeError("amount must be an integer") notAfter = _lib.X509_get_notAfter(self._x509) _lib.X509_gmtime_adj(notAfter, amount)
python
{ "resource": "" }
q269821
X509.gmtime_adj_notBefore
test
def gmtime_adj_notBefore(self, amount): """ Adjust the timestamp on which the certificate starts being valid. :param amount: The number of seconds by which to adjust the timestamp. :return: ``None`` """ if not isinstance(amount, int): raise TypeError("amount must be an integer") notBefore = _lib.X509_get_notBefore(self._x509) _lib.X509_gmtime_adj(notBefore, amount)
python
{ "resource": "" }
q269822
X509.has_expired
test
def has_expired(self): """ Check whether the certificate has expired. :return: ``True`` if the certificate has expired, ``False`` otherwise. :rtype: bool """ time_string = _native(self.get_notAfter()) not_after = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ") return not_after < datetime.datetime.utcnow()
python
{ "resource": "" }
q269823
X509.get_issuer
test
def get_issuer(self): """ Return the issuer of this certificate. This creates a new :class:`X509Name` that wraps the underlying issuer name field on the certificate. Modifying it will modify the underlying certificate, and will have the effect of modifying any other :class:`X509Name` that refers to this issuer. :return: The issuer of this certificate. :rtype: :class:`X509Name` """ name = self._get_name(_lib.X509_get_issuer_name) self._issuer_invalidator.add(name) return name
python
{ "resource": "" }
q269824
X509.set_issuer
test
def set_issuer(self, issuer): """ Set the issuer of this certificate. :param issuer: The issuer. :type issuer: :py:class:`X509Name` :return: ``None`` """ self._set_name(_lib.X509_set_issuer_name, issuer) self._issuer_invalidator.clear()
python
{ "resource": "" }
q269825
X509.get_subject
test
def get_subject(self): """ Return the subject of this certificate. This creates a new :class:`X509Name` that wraps the underlying subject name field on the certificate. Modifying it will modify the underlying certificate, and will have the effect of modifying any other :class:`X509Name` that refers to this subject. :return: The subject of this certificate. :rtype: :class:`X509Name` """ name = self._get_name(_lib.X509_get_subject_name) self._subject_invalidator.add(name) return name
python
{ "resource": "" }
q269826
X509.set_subject
test
def set_subject(self, subject): """ Set the subject of this certificate. :param subject: The subject. :type subject: :py:class:`X509Name` :return: ``None`` """ self._set_name(_lib.X509_set_subject_name, subject) self._subject_invalidator.clear()
python
{ "resource": "" }
q269827
X509.add_extensions
test
def add_extensions(self, extensions): """ Add extensions to the certificate. :param extensions: The extensions to add. :type extensions: An iterable of :py:class:`X509Extension` objects. :return: ``None`` """ for ext in extensions: if not isinstance(ext, X509Extension): raise ValueError("One of the elements is not an X509Extension") add_result = _lib.X509_add_ext(self._x509, ext._extension, -1) if not add_result: _raise_current_error()
python
{ "resource": "" }
q269828
X509.get_extension
test
def get_extension(self, index): """ Get a specific extension of the certificate by index. Extensions on a certificate are kept in order. The index parameter selects which extension will be returned. :param int index: The index of the extension to retrieve. :return: The extension at the specified index. :rtype: :py:class:`X509Extension` :raises IndexError: If the extension index was out of bounds. .. versionadded:: 0.12 """ ext = X509Extension.__new__(X509Extension) ext._extension = _lib.X509_get_ext(self._x509, index) if ext._extension == _ffi.NULL: raise IndexError("extension index out of bounds") extension = _lib.X509_EXTENSION_dup(ext._extension) ext._extension = _ffi.gc(extension, _lib.X509_EXTENSION_free) return ext
python
{ "resource": "" }
q269829
X509Store.add_cert
test
def add_cert(self, cert): """ Adds a trusted certificate to this store. Adding a certificate with this method adds this certificate as a *trusted* certificate. :param X509 cert: The certificate to add to this store. :raises TypeError: If the certificate is not an :class:`X509`. :raises OpenSSL.crypto.Error: If OpenSSL was unhappy with your certificate. :return: ``None`` if the certificate was added successfully. """ if not isinstance(cert, X509): raise TypeError() # As of OpenSSL 1.1.0i adding the same cert to the store more than # once doesn't cause an error. Accordingly, this code now silences # the error for OpenSSL < 1.1.0i as well. if _lib.X509_STORE_add_cert(self._store, cert._x509) == 0: code = _lib.ERR_peek_error() err_reason = _lib.ERR_GET_REASON(code) _openssl_assert( err_reason == _lib.X509_R_CERT_ALREADY_IN_HASH_TABLE ) _lib.ERR_clear_error()
python
{ "resource": "" }
q269830
X509Store.add_crl
test
def add_crl(self, crl): """ Add a certificate revocation list to this store. The certificate revocation lists added to a store will only be used if the associated flags are configured to check certificate revocation lists. .. versionadded:: 16.1.0 :param CRL crl: The certificate revocation list to add to this store. :return: ``None`` if the certificate revocation list was added successfully. """ _openssl_assert(_lib.X509_STORE_add_crl(self._store, crl._crl) != 0)
python
{ "resource": "" }
q269831
X509Store.set_time
test
def set_time(self, vfy_time): """ Set the time against which the certificates are verified. Normally the current time is used. .. note:: For example, you can determine if a certificate was valid at a given time. .. versionadded:: 17.0.0 :param datetime vfy_time: The verification time to set on this store. :return: ``None`` if the verification time was successfully set. """ param = _lib.X509_VERIFY_PARAM_new() param = _ffi.gc(param, _lib.X509_VERIFY_PARAM_free) _lib.X509_VERIFY_PARAM_set_time(param, int(vfy_time.strftime('%s'))) _openssl_assert(_lib.X509_STORE_set1_param(self._store, param) != 0)
python
{ "resource": "" }
q269832
X509StoreContext._init
test
def _init(self): """ Set up the store context for a subsequent verification operation. Calling this method more than once without first calling :meth:`_cleanup` will leak memory. """ ret = _lib.X509_STORE_CTX_init( self._store_ctx, self._store._store, self._cert._x509, _ffi.NULL ) if ret <= 0: _raise_current_error()
python
{ "resource": "" }
q269833
X509StoreContext._exception_from_context
test
def _exception_from_context(self): """ Convert an OpenSSL native context error failure into a Python exception. When a call to native OpenSSL X509_verify_cert fails, additional information about the failure can be obtained from the store context. """ errors = [ _lib.X509_STORE_CTX_get_error(self._store_ctx), _lib.X509_STORE_CTX_get_error_depth(self._store_ctx), _native(_ffi.string(_lib.X509_verify_cert_error_string( _lib.X509_STORE_CTX_get_error(self._store_ctx)))), ] # A context error should always be associated with a certificate, so we # expect this call to never return :class:`None`. _x509 = _lib.X509_STORE_CTX_get_current_cert(self._store_ctx) _cert = _lib.X509_dup(_x509) pycert = X509._from_raw_x509_ptr(_cert) return X509StoreContextError(errors, pycert)
python
{ "resource": "" }
q269834
X509StoreContext.verify_certificate
test
def verify_certificate(self): """ Verify a certificate in a context. .. versionadded:: 0.15 :raises X509StoreContextError: If an error occurred when validating a certificate in the context. Sets ``certificate`` attribute to indicate which certificate caused the error. """ # Always re-initialize the store context in case # :meth:`verify_certificate` is called multiple times. # # :meth:`_init` is called in :meth:`__init__` so _cleanup is called # before _init to ensure memory is not leaked. self._cleanup() self._init() ret = _lib.X509_verify_cert(self._store_ctx) self._cleanup() if ret <= 0: raise self._exception_from_context()
python
{ "resource": "" }
q269835
Revoked.set_serial
test
def set_serial(self, hex_str): """ Set the serial number. The serial number is formatted as a hexadecimal number encoded in ASCII. :param bytes hex_str: The new serial number. :return: ``None`` """ bignum_serial = _ffi.gc(_lib.BN_new(), _lib.BN_free) bignum_ptr = _ffi.new("BIGNUM**") bignum_ptr[0] = bignum_serial bn_result = _lib.BN_hex2bn(bignum_ptr, hex_str) if not bn_result: raise ValueError("bad hex string") asn1_serial = _ffi.gc( _lib.BN_to_ASN1_INTEGER(bignum_serial, _ffi.NULL), _lib.ASN1_INTEGER_free) _lib.X509_REVOKED_set_serialNumber(self._revoked, asn1_serial)
python
{ "resource": "" }
q269836
Revoked.get_serial
test
def get_serial(self): """ Get the serial number. The serial number is formatted as a hexadecimal number encoded in ASCII. :return: The serial number. :rtype: bytes """ bio = _new_mem_buf() asn1_int = _lib.X509_REVOKED_get0_serialNumber(self._revoked) _openssl_assert(asn1_int != _ffi.NULL) result = _lib.i2a_ASN1_INTEGER(bio, asn1_int) _openssl_assert(result >= 0) return _bio_to_string(bio)
python
{ "resource": "" }
q269837
Revoked.set_reason
test
def set_reason(self, reason): """ Set the reason of this revocation. If :data:`reason` is ``None``, delete the reason instead. :param reason: The reason string. :type reason: :class:`bytes` or :class:`NoneType` :return: ``None`` .. seealso:: :meth:`all_reasons`, which gives you a list of all supported reasons which you might pass to this method. """ if reason is None: self._delete_reason() elif not isinstance(reason, bytes): raise TypeError("reason must be None or a byte string") else: reason = reason.lower().replace(b' ', b'') reason_code = [r.lower() for r in self._crl_reasons].index(reason) new_reason_ext = _lib.ASN1_ENUMERATED_new() _openssl_assert(new_reason_ext != _ffi.NULL) new_reason_ext = _ffi.gc(new_reason_ext, _lib.ASN1_ENUMERATED_free) set_result = _lib.ASN1_ENUMERATED_set(new_reason_ext, reason_code) _openssl_assert(set_result != _ffi.NULL) self._delete_reason() add_result = _lib.X509_REVOKED_add1_ext_i2d( self._revoked, _lib.NID_crl_reason, new_reason_ext, 0, 0) _openssl_assert(add_result == 1)
python
{ "resource": "" }
q269838
Revoked.get_reason
test
def get_reason(self): """ Get the reason of this revocation. :return: The reason, or ``None`` if there is none. :rtype: bytes or NoneType .. seealso:: :meth:`all_reasons`, which gives you a list of all supported reasons this method might return. """ for i in range(_lib.X509_REVOKED_get_ext_count(self._revoked)): ext = _lib.X509_REVOKED_get_ext(self._revoked, i) obj = _lib.X509_EXTENSION_get_object(ext) if _lib.OBJ_obj2nid(obj) == _lib.NID_crl_reason: bio = _new_mem_buf() print_result = _lib.X509V3_EXT_print(bio, ext, 0, 0) if not print_result: print_result = _lib.M_ASN1_OCTET_STRING_print( bio, _lib.X509_EXTENSION_get_data(ext) ) _openssl_assert(print_result != 0) return _bio_to_string(bio)
python
{ "resource": "" }
q269839
Revoked.set_rev_date
test
def set_rev_date(self, when): """ Set the revocation timestamp. :param bytes when: The timestamp of the revocation, as ASN.1 TIME. :return: ``None`` """ dt = _lib.X509_REVOKED_get0_revocationDate(self._revoked) return _set_asn1_time(dt, when)
python
{ "resource": "" }
q269840
CRL.to_cryptography
test
def to_cryptography(self): """ Export as a ``cryptography`` CRL. :rtype: ``cryptography.x509.CertificateRevocationList`` .. versionadded:: 17.1.0 """ from cryptography.hazmat.backends.openssl.x509 import ( _CertificateRevocationList ) backend = _get_backend() return _CertificateRevocationList(backend, self._crl)
python
{ "resource": "" }
q269841
CRL.get_revoked
test
def get_revoked(self): """ Return the revocations in this certificate revocation list. These revocations will be provided by value, not by reference. That means it's okay to mutate them: it won't affect this CRL. :return: The revocations in this CRL. :rtype: :class:`tuple` of :class:`Revocation` """ results = [] revoked_stack = _lib.X509_CRL_get_REVOKED(self._crl) for i in range(_lib.sk_X509_REVOKED_num(revoked_stack)): revoked = _lib.sk_X509_REVOKED_value(revoked_stack, i) revoked_copy = _lib.Cryptography_X509_REVOKED_dup(revoked) pyrev = Revoked.__new__(Revoked) pyrev._revoked = _ffi.gc(revoked_copy, _lib.X509_REVOKED_free) results.append(pyrev) if results: return tuple(results)
python
{ "resource": "" }
q269842
CRL.get_issuer
test
def get_issuer(self): """ Get the CRL's issuer. .. versionadded:: 16.1.0 :rtype: X509Name """ _issuer = _lib.X509_NAME_dup(_lib.X509_CRL_get_issuer(self._crl)) _openssl_assert(_issuer != _ffi.NULL) _issuer = _ffi.gc(_issuer, _lib.X509_NAME_free) issuer = X509Name.__new__(X509Name) issuer._name = _issuer return issuer
python
{ "resource": "" }
q269843
CRL.sign
test
def sign(self, issuer_cert, issuer_key, digest): """ Sign the CRL. Signing a CRL enables clients to associate the CRL itself with an issuer. Before a CRL is meaningful to other OpenSSL functions, it must be signed by an issuer. This method implicitly sets the issuer's name based on the issuer certificate and private key used to sign the CRL. .. versionadded:: 16.1.0 :param X509 issuer_cert: The issuer's certificate. :param PKey issuer_key: The issuer's private key. :param bytes digest: The digest method to sign the CRL with. """ digest_obj = _lib.EVP_get_digestbyname(digest) _openssl_assert(digest_obj != _ffi.NULL) _lib.X509_CRL_set_issuer_name( self._crl, _lib.X509_get_subject_name(issuer_cert._x509)) _lib.X509_CRL_sort(self._crl) result = _lib.X509_CRL_sign(self._crl, issuer_key._pkey, digest_obj) _openssl_assert(result != 0)
python
{ "resource": "" }
q269844
CRL.export
test
def export(self, cert, key, type=FILETYPE_PEM, days=100, digest=_UNSPECIFIED): """ Export the CRL as a string. :param X509 cert: The certificate used to sign the CRL. :param PKey key: The key used to sign the CRL. :param int type: The export format, either :data:`FILETYPE_PEM`, :data:`FILETYPE_ASN1`, or :data:`FILETYPE_TEXT`. :param int days: The number of days until the next update of this CRL. :param bytes digest: The name of the message digest to use (eg ``b"sha256"``). :rtype: bytes """ if not isinstance(cert, X509): raise TypeError("cert must be an X509 instance") if not isinstance(key, PKey): raise TypeError("key must be a PKey instance") if not isinstance(type, int): raise TypeError("type must be an integer") if digest is _UNSPECIFIED: raise TypeError("digest must be provided") digest_obj = _lib.EVP_get_digestbyname(digest) if digest_obj == _ffi.NULL: raise ValueError("No such digest method") bio = _lib.BIO_new(_lib.BIO_s_mem()) _openssl_assert(bio != _ffi.NULL) # A scratch time object to give different values to different CRL # fields sometime = _lib.ASN1_TIME_new() _openssl_assert(sometime != _ffi.NULL) _lib.X509_gmtime_adj(sometime, 0) _lib.X509_CRL_set_lastUpdate(self._crl, sometime) _lib.X509_gmtime_adj(sometime, days * 24 * 60 * 60) _lib.X509_CRL_set_nextUpdate(self._crl, sometime) _lib.X509_CRL_set_issuer_name( self._crl, _lib.X509_get_subject_name(cert._x509) ) sign_result = _lib.X509_CRL_sign(self._crl, key._pkey, digest_obj) if not sign_result: _raise_current_error() return dump_crl(type, self)
python
{ "resource": "" }
q269845
PKCS7.get_type_name
test
def get_type_name(self): """ Returns the type name of the PKCS7 structure :return: A string with the typename """ nid = _lib.OBJ_obj2nid(self._pkcs7.type) string_type = _lib.OBJ_nid2sn(nid) return _ffi.string(string_type)
python
{ "resource": "" }
q269846
PKCS12.set_ca_certificates
test
def set_ca_certificates(self, cacerts): """ Replace or set the CA certificates within the PKCS12 object. :param cacerts: The new CA certificates, or :py:const:`None` to unset them. :type cacerts: An iterable of :py:class:`X509` or :py:const:`None` :return: ``None`` """ if cacerts is None: self._cacerts = None else: cacerts = list(cacerts) for cert in cacerts: if not isinstance(cert, X509): raise TypeError( "iterable must only contain X509 instances" ) self._cacerts = cacerts
python
{ "resource": "" }
q269847
PKCS12.export
test
def export(self, passphrase=None, iter=2048, maciter=1): """ Dump a PKCS12 object as a string. For more information, see the :c:func:`PKCS12_create` man page. :param passphrase: The passphrase used to encrypt the structure. Unlike some other passphrase arguments, this *must* be a string, not a callback. :type passphrase: :py:data:`bytes` :param iter: Number of times to repeat the encryption step. :type iter: :py:data:`int` :param maciter: Number of times to repeat the MAC step. :type maciter: :py:data:`int` :return: The string representation of the PKCS #12 structure. :rtype: """ passphrase = _text_to_bytes_and_warn("passphrase", passphrase) if self._cacerts is None: cacerts = _ffi.NULL else: cacerts = _lib.sk_X509_new_null() cacerts = _ffi.gc(cacerts, _lib.sk_X509_free) for cert in self._cacerts: _lib.sk_X509_push(cacerts, cert._x509) if passphrase is None: passphrase = _ffi.NULL friendlyname = self._friendlyname if friendlyname is None: friendlyname = _ffi.NULL if self._pkey is None: pkey = _ffi.NULL else: pkey = self._pkey._pkey if self._cert is None: cert = _ffi.NULL else: cert = self._cert._x509 pkcs12 = _lib.PKCS12_create( passphrase, friendlyname, pkey, cert, cacerts, _lib.NID_pbe_WithSHA1And3_Key_TripleDES_CBC, _lib.NID_pbe_WithSHA1And3_Key_TripleDES_CBC, iter, maciter, 0) if pkcs12 == _ffi.NULL: _raise_current_error() pkcs12 = _ffi.gc(pkcs12, _lib.PKCS12_free) bio = _new_mem_buf() _lib.i2d_PKCS12_bio(bio, pkcs12) return _bio_to_string(bio)
python
{ "resource": "" }
q269848
NetscapeSPKI.sign
test
def sign(self, pkey, digest): """ Sign the certificate request with this key and digest type. :param pkey: The private key to sign with. :type pkey: :py:class:`PKey` :param digest: The message digest to use. :type digest: :py:class:`bytes` :return: ``None`` """ if pkey._only_public: raise ValueError("Key has only public part") if not pkey._initialized: raise ValueError("Key is uninitialized") digest_obj = _lib.EVP_get_digestbyname(_byte_string(digest)) if digest_obj == _ffi.NULL: raise ValueError("No such digest method") sign_result = _lib.NETSCAPE_SPKI_sign( self._spki, pkey._pkey, digest_obj ) _openssl_assert(sign_result > 0)
python
{ "resource": "" }
q269849
NetscapeSPKI.verify
test
def verify(self, key): """ Verifies a signature on a certificate request. :param PKey key: The public key that signature is supposedly from. :return: ``True`` if the signature is correct. :rtype: bool :raises OpenSSL.crypto.Error: If the signature is invalid, or there was a problem verifying the signature. """ answer = _lib.NETSCAPE_SPKI_verify(self._spki, key._pkey) if answer <= 0: _raise_current_error() return True
python
{ "resource": "" }
q269850
NetscapeSPKI.b64_encode
test
def b64_encode(self): """ Generate a base64 encoded representation of this SPKI object. :return: The base64 encoded string. :rtype: :py:class:`bytes` """ encoded = _lib.NETSCAPE_SPKI_b64_encode(self._spki) result = _ffi.string(encoded) _lib.OPENSSL_free(encoded) return result
python
{ "resource": "" }
q269851
NetscapeSPKI.get_pubkey
test
def get_pubkey(self): """ Get the public key of this certificate. :return: The public key. :rtype: :py:class:`PKey` """ pkey = PKey.__new__(PKey) pkey._pkey = _lib.NETSCAPE_SPKI_get_pubkey(self._spki) _openssl_assert(pkey._pkey != _ffi.NULL) pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free) pkey._only_public = True return pkey
python
{ "resource": "" }
q269852
NetscapeSPKI.set_pubkey
test
def set_pubkey(self, pkey): """ Set the public key of the certificate :param pkey: The public key :return: ``None`` """ set_result = _lib.NETSCAPE_SPKI_set_pubkey(self._spki, pkey._pkey) _openssl_assert(set_result == 1)
python
{ "resource": "" }
q269853
exception_from_error_queue
test
def exception_from_error_queue(exception_type): """ Convert an OpenSSL library failure into a Python exception. When a call to the native OpenSSL library fails, this is usually signalled by the return value, and an error code is stored in an error queue associated with the current thread. The err library provides functions to obtain these error codes and textual error messages. """ errors = [] while True: error = lib.ERR_get_error() if error == 0: break errors.append(( text(lib.ERR_lib_error_string(error)), text(lib.ERR_func_error_string(error)), text(lib.ERR_reason_error_string(error)))) raise exception_type(errors)
python
{ "resource": "" }
q269854
text_to_bytes_and_warn
test
def text_to_bytes_and_warn(label, obj): """ If ``obj`` is text, emit a warning that it should be bytes instead and try to convert it to bytes automatically. :param str label: The name of the parameter from which ``obj`` was taken (so a developer can easily find the source of the problem and correct it). :return: If ``obj`` is the text string type, a ``bytes`` object giving the UTF-8 encoding of that text is returned. Otherwise, ``obj`` itself is returned. """ if isinstance(obj, text_type): warnings.warn( _TEXT_WARNING.format(label), category=DeprecationWarning, stacklevel=3 ) return obj.encode('utf-8') return obj
python
{ "resource": "" }
q269855
_print_token_factory
test
def _print_token_factory(col): """Internal helper to provide color names.""" def _helper(msg): style = style_from_dict({ Token.Color: col, }) tokens = [ (Token.Color, msg) ] print_tokens(tokens, style=style) def _helper_no_terminal(msg): # workaround if we have no terminal print(msg) if sys.stdout.isatty(): return _helper else: return _helper_no_terminal
python
{ "resource": "" }
q269856
TrelloService.get_service_metadata
test
def get_service_metadata(self): """ Return extra config options to be passed to the TrelloIssue class """ return { 'import_labels_as_tags': self.config.get('import_labels_as_tags', False, asbool), 'label_template': self.config.get('label_template', DEFAULT_LABEL_TEMPLATE), }
python
{ "resource": "" }
q269857
TrelloService.issues
test
def issues(self): """ Returns a list of dicts representing issues from a remote service. """ for board in self.get_boards(): for lst in self.get_lists(board['id']): listextra = dict(boardname=board['name'], listname=lst['name']) for card in self.get_cards(lst['id']): issue = self.get_issue_for_record(card, extra=listextra) issue.update_extra({"annotations": self.annotations(card)}) yield issue
python
{ "resource": "" }
q269858
TrelloService.annotations
test
def annotations(self, card_json): """ A wrapper around get_comments that build the taskwarrior annotations. """ comments = self.get_comments(card_json['id']) annotations = self.build_annotations( ((c['memberCreator']['username'], c['data']['text']) for c in comments), card_json["shortUrl"]) return annotations
python
{ "resource": "" }
q269859
TrelloService.get_boards
test
def get_boards(self): """ Get the list of boards to pull cards from. If the user gave a value to trello.include_boards use that, otherwise ask the Trello API for the user's boards. """ if 'include_boards' in self.config: for boardid in self.config.get('include_boards', to_type=aslist): # Get the board name yield self.api_request( "/1/boards/{id}".format(id=boardid), fields='name') else: boards = self.api_request("/1/members/me/boards", fields='name') for board in boards: yield board
python
{ "resource": "" }
q269860
TrelloService.get_lists
test
def get_lists(self, board): """ Returns a list of the filtered lists for the given board This filters the trello lists according to the configuration values of trello.include_lists and trello.exclude_lists. """ lists = self.api_request( "/1/boards/{board_id}/lists/open".format(board_id=board), fields='name') include_lists = self.config.get('include_lists', to_type=aslist) if include_lists: lists = [l for l in lists if l['name'] in include_lists] exclude_lists = self.config.get('exclude_lists', to_type=aslist) if exclude_lists: lists = [l for l in lists if l['name'] not in exclude_lists] return lists
python
{ "resource": "" }
q269861
TrelloService.get_cards
test
def get_cards(self, list_id): """ Returns an iterator for the cards in a given list, filtered according to configuration values of trello.only_if_assigned and trello.also_unassigned """ params = {'fields': 'name,idShort,shortLink,shortUrl,url,labels,due'} member = self.config.get('only_if_assigned', None) unassigned = self.config.get('also_unassigned', False, asbool) if member is not None: params['members'] = 'true' params['member_fields'] = 'username' cards = self.api_request( "/1/lists/{list_id}/cards/open".format(list_id=list_id), **params) for card in cards: if (member is None or member in [m['username'] for m in card['members']] or (unassigned and not card['members'])): yield card
python
{ "resource": "" }
q269862
TrelloService.get_comments
test
def get_comments(self, card_id): """ Returns an iterator for the comments on a certain card. """ params = {'filter': 'commentCard', 'memberCreator_fields': 'username'} comments = self.api_request( "/1/cards/{card_id}/actions".format(card_id=card_id), **params) for comment in comments: assert comment['type'] == 'commentCard' yield comment
python
{ "resource": "" }
q269863
GithubClient._api_url
test
def _api_url(self, path, **context): """ Build the full url to the API endpoint """ if self.host == 'github.com': baseurl = "https://api.github.com" else: baseurl = "https://{}/api/v3".format(self.host) return baseurl + path.format(**context)
python
{ "resource": "" }
q269864
GithubClient._getter
test
def _getter(self, url, subkey=None): """ Pagination utility. Obnoxious. """ kwargs = {} if 'basic' in self.auth: kwargs['auth'] = self.auth['basic'] results = [] link = dict(next=url) while 'next' in link: response = self.session.get(link['next'], **kwargs) # Warn about the mis-leading 404 error code. See: # https://github.com/ralphbean/bugwarrior/issues/374 if response.status_code == 404 and 'token' in self.auth: log.warn("A '404' from github may indicate an auth " "failure. Make sure both that your token is correct " "and that it has 'public_repo' and not 'public " "access' rights.") json_res = self.json_response(response) if subkey is not None: json_res = json_res[subkey] results += json_res link = self._link_field_to_dict(response.headers.get('link', None)) return results
python
{ "resource": "" }
q269865
GithubClient._link_field_to_dict
test
def _link_field_to_dict(field): """ Utility for ripping apart github's Link header field. It's kind of ugly. """ if not field: return dict() return dict([ ( part.split('; ')[1][5:-1], part.split('; ')[0][1:-1], ) for part in field.split(', ') ])
python
{ "resource": "" }
q269866
GithubService.get_query
test
def get_query(self, query): """ Grab all issues matching a github query """ issues = {} for issue in self.client.get_query(query): url = issue['html_url'] try: repo = self.get_repository_from_issue(issue) except ValueError as e: log.critical(e) else: issues[url] = (repo, issue) return issues
python
{ "resource": "" }
q269867
GithubService._reqs
test
def _reqs(self, tag): """ Grab all the pull requests """ return [ (tag, i) for i in self.client.get_pulls(*tag.split('/')) ]
python
{ "resource": "" }
q269868
aggregate_issues
test
def aggregate_issues(conf, main_section, debug): """ Return all issues from every target. """ log.info("Starting to aggregate remote issues.") # Create and call service objects for every target in the config targets = aslist(conf.get(main_section, 'targets')) queue = multiprocessing.Queue() log.info("Spawning %i workers." % len(targets)) processes = [] if debug: for target in targets: _aggregate_issues( conf, main_section, target, queue, conf.get(target, 'service') ) else: for target in targets: proc = multiprocessing.Process( target=_aggregate_issues, args=(conf, main_section, target, queue, conf.get(target, 'service')) ) proc.start() processes.append(proc) # Sleep for 1 second here to try and avoid a race condition where # all N workers start up and ask the gpg-agent process for # information at the same time. This causes gpg-agent to fumble # and tell some of our workers some incomplete things. time.sleep(1) currently_running = len(targets) while currently_running > 0: issue = queue.get(True) if isinstance(issue, tuple): completion_type, args = issue if completion_type == SERVICE_FINISHED_ERROR: target, e = args log.info("Terminating workers") for process in processes: process.terminate() raise RuntimeError( "critical error in target '{}'".format(target)) currently_running -= 1 continue yield issue log.info("Done aggregating remote issues.")
python
{ "resource": "" }
q269869
IssueService._get_config_or_default
test
def _get_config_or_default(self, key, default, as_type=lambda x: x): """Return a main config value, or default if it does not exist.""" if self.main_config.has_option(self.main_section, key): return as_type(self.main_config.get(self.main_section, key)) return default
python
{ "resource": "" }
q269870
IssueService.get_templates
test
def get_templates(self): """ Get any defined templates for configuration values. Users can override the value of any Taskwarrior field using this feature on a per-key basis. The key should be the name of the field to you would like to configure the value of, followed by '_template', and the value should be a Jinja template generating the field's value. As context variables, all fields on the taskwarrior record are available. For example, to prefix the returned project name for tickets returned by a service with 'workproject_', you could add an entry reading: project_template = workproject_{{project}} Or, if you'd simply like to override the returned project name for all tickets incoming from a specific service, you could add an entry like: project_template = myprojectname The above would cause all issues to recieve a project name of 'myprojectname', regardless of what the project name of the generated issue was. """ templates = {} for key in six.iterkeys(Task.FIELDS): template_key = '%s_template' % key if template_key in self.config: templates[key] = self.config.get(template_key) return templates
python
{ "resource": "" }
q269871
IssueService.validate_config
test
def validate_config(cls, service_config, target): """ Validate generic options for a particular target """ if service_config.has_option(target, 'only_if_assigned'): die("[%s] has an 'only_if_assigned' option. Should be " "'%s.only_if_assigned'." % (target, cls.CONFIG_PREFIX)) if service_config.has_option(target, 'also_unassigned'): die("[%s] has an 'also_unassigned' option. Should be " "'%s.also_unassigned'." % (target, cls.CONFIG_PREFIX)) if service_config.has_option(target, 'default_priority'): die("[%s] has a 'default_priority' option. Should be " "'%s.default_priority'." % (target, cls.CONFIG_PREFIX)) if service_config.has_option(target, 'add_tags'): die("[%s] has an 'add_tags' option. Should be " "'%s.add_tags'." % (target, cls.CONFIG_PREFIX))
python
{ "resource": "" }
q269872
IssueService.include
test
def include(self, issue): """ Return true if the issue in question should be included """ only_if_assigned = self.config.get('only_if_assigned', None) if only_if_assigned: owner = self.get_owner(issue) include_owners = [only_if_assigned] if self.config.get('also_unassigned', None, asbool): include_owners.append(None) return owner in include_owners only_if_author = self.config.get('only_if_author', None) if only_if_author: return self.get_author(issue) == only_if_author return True
python
{ "resource": "" }
q269873
make_table
test
def make_table(grid): """ Make a RST-compatible table From http://stackoverflow.com/a/12539081 """ cell_width = 2 + max( reduce( lambda x, y: x+y, [[len(item) for item in row] for row in grid], [] ) ) num_cols = len(grid[0]) rst = table_div(num_cols, cell_width, 0) header_flag = 1 for row in grid: rst = rst + '| ' + '| '.join( [normalize_cell(x, cell_width-1) for x in row] ) + '|\n' rst = rst + table_div(num_cols, cell_width, header_flag) header_flag = 0 return rst
python
{ "resource": "" }
q269874
oracle_eval
test
def oracle_eval(command): """ Retrieve password from the given command """ p = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.wait() if p.returncode == 0: return p.stdout.readline().strip().decode('utf-8') else: die( "Error retrieving password: `{command}` returned '{error}'".format( command=command, error=p.stderr.read().strip()))
python
{ "resource": "" }
q269875
BugwarriorConfigParser.getint
test
def getint(self, section, option): """ Accepts both integers and empty values. """ try: return super(BugwarriorConfigParser, self).getint(section, option) except ValueError: if self.get(section, option) == u'': return None else: raise ValueError( "{section}.{option} must be an integer or empty.".format( section=section, option=option))
python
{ "resource": "" }
q269876
pull
test
def pull(dry_run, flavor, interactive, debug): """ Pull down tasks from forges and add them to your taskwarrior tasks. Relies on configuration in bugwarriorrc """ try: main_section = _get_section_name(flavor) config = _try_load_config(main_section, interactive) lockfile_path = os.path.join(get_data_path(config, main_section), 'bugwarrior.lockfile') lockfile = PIDLockFile(lockfile_path) lockfile.acquire(timeout=10) try: # Get all the issues. This can take a while. issue_generator = aggregate_issues(config, main_section, debug) # Stuff them in the taskwarrior db as necessary synchronize(issue_generator, config, main_section, dry_run) finally: lockfile.release() except LockTimeout: log.critical( 'Your taskrc repository is currently locked. ' 'Remove the file at %s if you are sure no other ' 'bugwarrior processes are currently running.' % ( lockfile_path ) ) except RuntimeError as e: log.exception("Aborted (%s)" % e)
python
{ "resource": "" }
q269877
BitbucketService.get_data
test
def get_data(self, url): """ Perform a request to the fully qualified url and return json. """ return self.json_response(requests.get(url, **self.requests_kwargs))
python
{ "resource": "" }
q269878
BitbucketService.get_collection
test
def get_collection(self, url): """ Pages through an object collection from the bitbucket API. Returns an iterator that lazily goes through all the 'values' of all the pages in the collection. """ url = self.BASE_API2 + url while url is not None: response = self.get_data(url) for value in response['values']: yield value url = response.get('next', None)
python
{ "resource": "" }
q269879
find_local_uuid
test
def find_local_uuid(tw, keys, issue, legacy_matching=False): """ For a given issue issue, find its local UUID. Assembles a list of task IDs existing in taskwarrior matching the supplied issue (`issue`) on the combination of any set of supplied unique identifiers (`keys`) or, optionally, the task's description field (should `legacy_matching` be `True`). :params: * `tw`: An instance of `taskw.TaskWarriorShellout` * `keys`: A list of lists of keys to use for uniquely identifying an issue. To clarify the "list of lists" behavior, assume that there are two services, one having a single primary key field -- 'serviceAid' -- and another having a pair of fields composing its primary key -- 'serviceBproject' and 'serviceBnumber' --, the incoming data for this field would be:: [ ['serviceAid'], ['serviceBproject', 'serviceBnumber'], ] * `issue`: An instance of a subclass of `bugwarrior.services.Issue`. * `legacy_matching`: By default, this is disabled, and it allows the matching algorithm to -- in addition to searching by stored issue keys -- search using the task's description for a match. It is prone to error and should avoided if possible. :returns: * A single string UUID. :raises: * `bugwarrior.db.MultipleMatches`: if multiple matches were found. * `bugwarrior.db.NotFound`: if an issue was not found. """ if not issue['description']: raise ValueError('Issue %s has no description.' % issue) possibilities = set([]) if legacy_matching: legacy_description = issue.get_default_description().rsplit('..', 1)[0] # Furthermore, we have to kill off any single quotes which break in # task-2.4.x, as much as it saddens me. legacy_description = legacy_description.split("'")[0] results = tw.filter_tasks({ 'description.startswith': legacy_description, 'or': [ ('status', 'pending'), ('status', 'waiting'), ], }) possibilities = possibilities | set([ task['uuid'] for task in results ]) for service, key_list in six.iteritems(keys): if any([key in issue for key in key_list]): results = tw.filter_tasks({ 'and': [("%s.is" % key, issue[key]) for key in key_list], 'or': [ ('status', 'pending'), ('status', 'waiting'), ], }) possibilities = possibilities | set([ task['uuid'] for task in results ]) if len(possibilities) == 1: return possibilities.pop() if len(possibilities) > 1: raise MultipleMatches( "Issue %s matched multiple IDs: %s" % ( issue['description'], possibilities ) ) raise NotFound( "No issue was found matching %s" % issue )
python
{ "resource": "" }
q269880
merge_left
test
def merge_left(field, local_task, remote_issue, hamming=False): """ Merge array field from the remote_issue into local_task * Local 'left' entries are preserved without modification * Remote 'left' are appended to task if not present in local. :param `field`: Task field to merge. :param `local_task`: `taskw.task.Task` object into which to merge remote changes. :param `remote_issue`: `dict` instance from which to merge into local task. :param `hamming`: (default `False`) If `True`, compare entries by truncating to maximum length, and comparing hamming distances. Useful generally only for annotations. """ # Ensure that empty defaults are present local_field = local_task.get(field, []) remote_field = remote_issue.get(field, []) # We need to make sure an array exists for this field because # we will be appending to it in a moment. if field not in local_task: local_task[field] = [] # If a remote does not appear in local, add it to the local task new_count = 0 for remote in remote_field: for local in local_field: if ( # For annotations, they don't have to match *exactly*. ( hamming and get_annotation_hamming_distance(remote, local) == 0 ) # But for everything else, they should. or ( remote == local ) ): break else: log.debug("%s not found in %r" % (remote, local_field)) local_task[field].append(remote) new_count += 1 if new_count > 0: log.debug('Added %s new values to %s (total: %s)' % ( new_count, field, len(local_task[field]),))
python
{ "resource": "" }
q269881
build_uda_config_overrides
test
def build_uda_config_overrides(targets): """ Returns a list of UDAs defined by given targets For all targets in `targets`, build a dictionary of configuration overrides representing the UDAs defined by the passed-in services (`targets`). Given a hypothetical situation in which you have two services, the first of which defining a UDA named 'serviceAid' ("Service A ID", string) and a second service defining two UDAs named 'serviceBproject' ("Service B Project", string) and 'serviceBnumber' ("Service B Number", numeric), this would return the following structure:: { 'uda': { 'serviceAid': { 'label': 'Service A ID', 'type': 'string', }, 'serviceBproject': { 'label': 'Service B Project', 'type': 'string', }, 'serviceBnumber': { 'label': 'Service B Number', 'type': 'numeric', } } } """ from bugwarrior.services import get_service targets_udas = {} for target in targets: targets_udas.update(get_service(target).ISSUE_CLASS.UDAS) return { 'uda': targets_udas }
python
{ "resource": "" }
q269882
_parse_sprint_string
test
def _parse_sprint_string(sprint): """ Parse the big ugly sprint string stored by JIRA. They look like: com.atlassian.greenhopper.service.sprint.Sprint@4c9c41a5[id=2322,rapid ViewId=1173,state=ACTIVE,name=Sprint 1,startDate=2016-09-06T16:08:07.4 55Z,endDate=2016-09-23T16:08:00.000Z,completeDate=<null>,sequence=2322] """ entries = sprint[sprint.index('[')+1:sprint.index(']')].split('=') fields = sum((entry.rsplit(',', 1) for entry in entries), []) return dict(zip(fields[::2], fields[1::2]))
python
{ "resource": "" }
q269883
GmailService.get_credentials
test
def get_credentials(self): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ with self.AUTHENTICATION_LOCK: log.info('Starting authentication for %s', self.target) store = oauth2client.file.Storage(self.credentials_path) credentials = store.get() if not credentials or credentials.invalid: log.info("No valid login. Starting OAUTH flow.") flow = oauth2client.client.flow_from_clientsecrets(self.client_secret_path, self.SCOPES) flow.user_agent = self.APPLICATION_NAME flags = oauth2client.tools.argparser.parse_args([]) credentials = oauth2client.tools.run_flow(flow, store, flags) log.info('Storing credentials to %r', self.credentials_path) return credentials
python
{ "resource": "" }
q269884
multi_rouge_n
test
def multi_rouge_n(sequences, scores_ids, n=2): """ Efficient way to compute highly repetitive scoring i.e. sequences are involved multiple time Args: sequences(list[str]): list of sequences (either hyp or ref) scores_ids(list[tuple(int)]): list of pairs (hyp_id, ref_id) ie. scores[i] = rouge_n(scores_ids[i][0], scores_ids[i][1]) Returns: scores: list of length `len(scores_ids)` containing rouge `n` scores as a dict with 'f', 'r', 'p' Raises: KeyError: if there's a value of i in scores_ids that is not in [0, len(sequences)[ """ ngrams = [_get_word_ngrams(n, sequence) for sequence in sequences] counts = [len(ngram) for ngram in ngrams] scores = [] for hyp_id, ref_id in scores_ids: evaluated_ngrams = ngrams[hyp_id] evaluated_count = counts[hyp_id] reference_ngrams = ngrams[ref_id] reference_count = counts[ref_id] overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) scores += [f_r_p_rouge_n(evaluated_count, reference_count, overlapping_count)] return scores
python
{ "resource": "" }
q269885
calc_pvalues
test
def calc_pvalues(query, gene_sets, background=20000, **kwargs): """ calculate pvalues for all categories in the graph :param set query: set of identifiers for which the p value is calculated :param dict gene_sets: gmt file dict after background was set :param set background: total number of genes in your annotated database. :returns: pvalues x: overlapped gene number n: length of gene_set which belongs to each terms hits: overlapped gene names. For 2*2 contingency table: ============================================================================= | in query | not in query | row total => in gene_set | a | b | a+b => not in gene_set | c | d | c+d column total | a+b+c+d = anno database ============================================================================= background genes number = a + b + c + d. Then, in R x=a the number of white balls drawn without replacement from an urn which contains both black and white balls. m=a+b the number of white balls in the urn n=c+d the number of black balls in the urn k=a+c the number of balls drawn from the urn In Scipy: for args in scipy.hypergeom.sf(k, M, n, N, loc=0): M: the total number of objects, n: the total number of Type I objects. k: the random variate represents the number of Type I objects in N drawn without replacement from the total population. Therefore, these two functions are the same when using parameters from 2*2 table: R: > phyper(x-1, m, n, k, lower.tail=FALSE) Scipy: >>> hypergeom.sf(x-1, m+n, m, k) """ # number of genes in your query data k = len(query) query = set(query) vals = [] # background should be all genes in annotated database # such as go, kegg et.al. if isinstance(background, set): bg = len(background) # total number in your annotated database # filter genes that not found in annotated database query = query.intersection(background) elif isinstance(background, int): bg = background else: raise ValueError("background should be set or int object") # pval subsets = sorted(gene_sets.keys()) for s in subsets: category = gene_sets.get(s) m = len(category) hits = query.intersection(set(category)) x = len(hits) if x < 1 : continue # pVal = hypergeom.sf(hitCount-1,popTotal,bgHits,queryTotal) # p(X >= hitCounts) vals.append((s, hypergeom.sf(x-1, bg, m, k), x, m, hits)) return zip(*vals)
python
{ "resource": "" }
q269886
fdrcorrection
test
def fdrcorrection(pvals, alpha=0.05): """ benjamini hocheberg fdr correction. inspired by statsmodels """ # Implement copy from GOATools. pvals = np.asarray(pvals) pvals_sortind = np.argsort(pvals) pvals_sorted = np.take(pvals, pvals_sortind) ecdffactor = _ecdf(pvals_sorted) reject = pvals_sorted <= ecdffactor*alpha if reject.any(): rejectmax = max(np.nonzero(reject)[0]) reject[:rejectmax] = True pvals_corrected_raw = pvals_sorted / ecdffactor pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1] del pvals_corrected_raw pvals_corrected[pvals_corrected>1] = 1 pvals_corrected_ = np.empty_like(pvals_corrected) pvals_corrected_[pvals_sortind] = pvals_corrected del pvals_corrected reject_ = np.empty_like(reject) reject_[pvals_sortind] = reject return reject_, pvals_corrected_
python
{ "resource": "" }
q269887
zscore
test
def zscore(data2d, axis=0): """Standardize the mean and variance of the data axis Parameters. :param data2d: DataFrame to normalize. :param axis: int, Which axis to normalize across. If 0, normalize across rows, if 1, normalize across columns. If None, don't change data :Returns: Normalized DataFrame. Normalized data with a mean of 0 and variance of 1 across the specified axis. """ if axis is None: # normalized to mean and std using entire matrix # z_scored = (data2d - data2d.values.mean()) / data2d.values.std(ddof=1) return data2d assert axis in [0,1] # if axis == 1: # z_scored = data2d # else: # z_scored = data2d.T # z_scored = (z_scored - z_scored.mean()) / z_scored.std(ddof=1) # if axis == 1: # return z_scored # else: # return z_scored.T z_scored = data2d.apply(lambda x: (x-x.mean())/x.std(ddof=1), axis=operator.xor(1, axis)) return z_scored
python
{ "resource": "" }
q269888
heatmap
test
def heatmap(df, z_score=None, title='', figsize=(5,5), cmap='RdBu_r', xticklabels=True, yticklabels=True, ofname=None, **kwargs): """Visualize the dataframe. :param df: DataFrame from expression table. :param z_score: z_score axis{0, 1}. If None, don't normalize data. :param title: gene set name. :param outdir: path to save heatmap. :param figsize: heatmap figsize. :param cmap: matplotlib colormap. :param ofname: output file name. If None, don't save figure """ df = zscore(df, axis=z_score) df = df.iloc[::-1] # Get the positions and used label for the ticks ny, nx = df.shape xticks = np.arange(0, nx, 1) + .5 yticks = np.arange(0, ny, 1) + .5 # If working on commandline, don't show figure if hasattr(sys, 'ps1') and (ofname is None): fig = plt.figure(figsize=figsize) else: fig = Figure(figsize=figsize) canvas = FigureCanvas(fig) ax = fig.add_subplot(111) vmin = np.percentile(df.min(), 2) vmax = np.percentile(df.max(), 98) matrix = ax.pcolormesh(df.values, cmap=cmap, vmin=vmin, vmax=vmax) ax.set_ylim([0,len(df)]) ax.set(xticks=xticks, yticks=yticks) ax.set_xticklabels(df.columns.values if xticklabels else '', fontsize=14, rotation=90) ax.set_yticklabels(df.index.values if yticklabels else '', fontsize=14) ax.set_title("%s\nHeatmap of the Analyzed Geneset"%title, fontsize=20) ax.tick_params(axis='both', which='both', bottom=False, top=False, right=False, left=False) # cax=fig.add_axes([0.93,0.25,0.05,0.20]) # cbar = fig.colorbar(matrix, cax=cax) cbar = colorbar(matrix) cbar.ax.tick_params(axis='both', which='both', bottom=False, top=False, right=False, left=False) for side in ["top", "right", "left", "bottom"]: ax.spines[side].set_visible(False) cbar.ax.spines[side].set_visible(False) # cbar.ax.set_title('',loc='left') if ofname is not None: # canvas.print_figure(ofname, bbox_inches='tight', dpi=300) fig.savefig(ofname, bbox_inches='tight', dpi=300) return
python
{ "resource": "" }
q269889
adjust_spines
test
def adjust_spines(ax, spines): """function for removing spines and ticks. :param ax: axes object :param spines: a list of spines names to keep. e.g [left, right, top, bottom] if spines = []. remove all spines and ticks. """ for loc, spine in ax.spines.items(): if loc in spines: # spine.set_position(('outward', 10)) # outward by 10 points # spine.set_smart_bounds(True) continue else: spine.set_color('none') # don't draw spine # turn off ticks where there is no spine if 'left' in spines: ax.yaxis.set_ticks_position('left') else: # no yaxis ticks ax.yaxis.set_ticks([]) if 'bottom' in spines: ax.xaxis.set_ticks_position('bottom') else: # no xaxis ticks ax.xaxis.set_ticks([])
python
{ "resource": "" }
q269890
prepare_argparser
test
def prepare_argparser(): """Prepare argparser object. New options will be added in this function first.""" description = "%(prog)s -- Gene Set Enrichment Analysis in Python" epilog = "For command line options of each command, type: %(prog)s COMMAND -h" # top-level parser argparser = ap.ArgumentParser(description=description, epilog=epilog) argparser.add_argument("--version", action="version", version="%(prog)s "+ __version__) subparsers = argparser.add_subparsers(dest='subcommand_name') #help="sub-command help") # command for 'gsea' add_gsea_parser(subparsers) # command for 'prerank' add_prerank_parser(subparsers) # command for 'ssgsea' add_singlesample_parser(subparsers) # command for 'plot' add_plot_parser(subparsers) # command for 'enrichr' add_enrichr_parser(subparsers) # command for 'biomart' add_biomart_parser(subparsers) return argparser
python
{ "resource": "" }
q269891
add_prerank_parser
test
def add_prerank_parser(subparsers): """Add function 'prerank' argument parsers.""" argparser_prerank = subparsers.add_parser("prerank", help="Run GSEApy Prerank tool on preranked gene list.") # group for input files prerank_input = argparser_prerank.add_argument_group("Input files arguments") prerank_input.add_argument("-r", "--rnk", dest="rnk", action="store", type=str, required=True, help="Ranking metric file in .rnk format. Same with GSEA.") prerank_input.add_argument("-g", "--gmt", dest="gmt", action="store", type=str, required=True, help="Gene set database in GMT format. Same with GSEA.") prerank_input.add_argument("-l", "--label", action='store', nargs=2, dest='label', metavar=('pos', 'neg'), type=str, default=('Pos','Neg'), help="The phenotype label argument need two parameters to define. Default: ('Pos','Neg')") # group for output files prerank_output = argparser_prerank.add_argument_group("Output arguments") add_output_option(prerank_output) # group for General options. prerank_opt = argparser_prerank.add_argument_group("GSEA advanced arguments") prerank_opt.add_argument("-n", "--permu-num", dest = "n", action="store", type=int, default=1000, metavar='nperm', help="Number of random permutations. For calculating esnulls. Default: 1000") prerank_opt.add_argument("--min-size", dest="mins", action="store", type=int, default=15, metavar='int', help="Min size of input genes presented in Gene Sets. Default: 15") prerank_opt.add_argument("--max-size", dest = "maxs", action="store", type=int, default=500, metavar='int', help="Max size of input genes presented in Gene Sets. Default: 500") prerank_opt.add_argument("-w", "--weight", action='store', dest='weight', default=1.0, type=float, metavar='float', help='Weighted_score of rank_metrics. For weighting input genes. Choose from {0, 1, 1.5, 2}. Default: 1',) prerank_opt.add_argument("-a", "--ascending", action='store_true', dest='ascending', default=False, help='Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.') prerank_opt.add_argument("-s", "--seed", dest = "seed", action="store", type=int, default=None, metavar='', help="Number of random seed. Default: None") prerank_opt.add_argument("-p", "--threads", dest = "threads", action="store", type=int, default=1, metavar='procs', help="Number of Processes you are going to use. Default: 1") return
python
{ "resource": "" }
q269892
add_plot_parser
test
def add_plot_parser(subparsers): """Add function 'plot' argument parsers.""" argparser_replot = subparsers.add_parser("replot", help="Reproduce GSEA desktop output figures.") group_replot = argparser_replot.add_argument_group("Input arguments") group_replot.add_argument("-i", "--indir", action="store", dest="indir", required=True, metavar='GSEA_dir', help="The GSEA desktop results directroy that you want to reproduce the figure ") add_output_option(group_replot) #add_output_group( argparser_plot ) group_replot.add_argument("-w", "--weight", action='store', dest='weight', default=1.0, type=float, metavar='float', help='Weighted_score of rank_metrics. Please Use the same value in GSEA. Choose from (0, 1, 1.5, 2),default: 1',) return
python
{ "resource": "" }
q269893
add_enrichr_parser
test
def add_enrichr_parser(subparsers): """Add function 'enrichr' argument parsers.""" argparser_enrichr = subparsers.add_parser("enrichr", help="Using Enrichr API to perform GO analysis.") # group for required options. enrichr_opt = argparser_enrichr.add_argument_group("Input arguments") enrichr_opt.add_argument("-i", "--input-list", action="store", dest="gene_list", type=str, required=True, metavar='IDs', help="Enrichr uses a list of gene names as input.") enrichr_opt.add_argument("-g", "--gene-sets", action="store", dest="library", type=str, required=True, metavar='GMT', help="Enrichr library name(s) required. Separate each name by comma.") enrichr_opt.add_argument("--org", "--organism", action="store", dest="organism", type=str, default='', help="Enrichr supported organism name. Default: human. See here: https://amp.pharm.mssm.edu/modEnrichr.") enrichr_opt.add_argument("--ds", "--description", action="store", dest="descrip", type=str, default='enrichr', metavar='STRING', help="It is recommended to enter a short description for your list so that multiple lists \ can be differentiated from each other if you choose to save or share your list.") enrichr_opt.add_argument("--cut", "--cut-off", action="store", dest="thresh", metavar='float', type=float, default=0.05, help="Adjust-Pval cutoff, used for generating plots. Default: 0.05.") enrichr_opt.add_argument("--bg", "--background", action="store", dest="bg", default='hsapiens_gene_ensembl', metavar='BGNUM', help="BioMart Dataset name or Background total genes number. Default: None") enrichr_opt.add_argument("-t", "--top-term", dest="term", action="store", type=int, default=10, metavar='int', help="Numbers of top terms shown in the plot. Default: 10") # enrichr_opt.add_argument("--scale", dest = "scale", action="store", type=float, default=0.5, metavar='float', # help="scatter dot scale in the dotplot. Default: 0.5") # enrichr_opt.add_argument("--no-plot", action='store_true', dest='no_plot', default=False, # help="Suppress the plot output.This is useful only if data are interested. Default: False.") enrichr_output = argparser_enrichr.add_argument_group("Output figure arguments") add_output_option(enrichr_output) return
python
{ "resource": "" }
q269894
enrichment_score
test
def enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1, nperm=1000, rs=np.random.RandomState(), single=False, scale=False): """This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA. :param gene_list: The ordered gene list gene_name_list, rank_metric.index.values :param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set. :param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation is a very reasonable choice that allows significant gene sets with less than perfect coherence. options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1 might be appropriate. On the other hand, if one uses sets with large number of genes and only a small subset of those is expected to be coherent, then one could consider using p > 1. Our recommendation is to use p = 1 and use other settings only if you are very experienced with the method and its behavior. :param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in the gene list. Or rankings, rank_metric.values :param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value equal to the permutation number. :param rs: Random state for initializing gene list shuffling. Default: np.random.RandomState(seed=None) :return: ES: Enrichment score (real number between -1 and +1) ESNULL: Enrichment score calculated from random permutations. Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set. RES: Numerical vector containing the running enrichment score for all locations in the gene list . """ N = len(gene_list) # Test whether each element of a 1-D array is also present in a second array # It's more intuitive here than original enrichment_score source code. # use .astype to covert bool to integer tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag) if weighted_score_type == 0 : correl_vector = np.repeat(1, N) else: correl_vector = np.abs(correl_vector)**weighted_score_type # get indices of tag_indicator hit_ind = np.flatnonzero(tag_indicator).tolist() # if used for compute esnull, set esnull equal to permutation number, e.g. 1000 # else just compute enrichment scores # set axis to 1, because we have 2D array axis = 1 tag_indicator = np.tile(tag_indicator, (nperm+1,1)) correl_vector = np.tile(correl_vector,(nperm+1,1)) # gene list permutation for i in range(nperm): rs.shuffle(tag_indicator[i]) # np.apply_along_axis(rs.shuffle, 1, tag_indicator) Nhint = tag_indicator.sum(axis=axis, keepdims=True) sum_correl_tag = np.sum(correl_vector*tag_indicator, axis=axis, keepdims=True) # compute ES score, the code below is identical to gsea enrichment_score method. no_tag_indicator = 1 - tag_indicator Nmiss = N - Nhint norm_tag = 1.0/sum_correl_tag norm_no_tag = 1.0/Nmiss RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis) if scale: RES = RES / N if single: es_vec = RES.sum(axis=axis) else: max_ES, min_ES = RES.max(axis=axis), RES.min(axis=axis) es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES) # extract values es, esnull, RES = es_vec[-1], es_vec[:-1], RES[-1,:] return es, esnull, hit_ind, RES
python
{ "resource": "" }
q269895
ranking_metric_tensor
test
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes, ascending, rs=np.random.RandomState()): """Build shuffled ranking matrix when permutation_type eq to phenotype. :param exprs: gene_expression DataFrame, gene_name indexed. :param str method: calculate correlation or ranking. methods including: 1. 'signal_to_noise'. 2. 't_test'. 3. 'ratio_of_classes' (also referred to as fold change). 4. 'diff_of_classes'. 5. 'log2_ratio_of_classes'. :param int permuation_num: how many times of classes is being shuffled :param str pos: one of labels of phenotype's names. :param str neg: one of labels of phenotype's names. :param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what class of phenotype. :param bool ascending: bool. Sort ascending vs. descending. :return: returns two 2d ndarray with shape (nperm, gene_num). | cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix. | cor_mat: sorted and permutated (exclude last row) ranking matrix. """ # S: samples, G: gene number G, S = exprs.shape # genes = exprs.index.values expr_mat = exprs.values.T perm_cor_tensor = np.tile(expr_mat, (permutation_num+1,1,1)) # random shuffle on the first dim, last matrix is not shuffled for arr in perm_cor_tensor[:-1]: rs.shuffle(arr) classes = np.array(classes) pos = classes == pos neg = classes == neg pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1) neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1) pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1) neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1) if method == 'signal_to_noise': cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std) elif method == 't_test': denom = 1.0/G cor_mat = (pos_cor_mean - neg_cor_mean)/ np.sqrt(denom*pos_cor_std**2 + denom*neg_cor_std**2) elif method == 'ratio_of_classes': cor_mat = pos_cor_mean / neg_cor_mean elif method == 'diff_of_classes': cor_mat = pos_cor_mean - neg_cor_mean elif method == 'log2_ratio_of_classes': cor_mat = np.log2(pos_cor_mean / neg_cor_mean) else: logging.error("Please provide correct method name!!!") sys.exit(0) # return matix[nperm+1, perm_cors] cor_mat_ind = cor_mat.argsort() # ndarray: sort in place cor_mat.sort() # genes_mat = genes.take(cor_mat_ind) if ascending: return cor_mat_ind, cor_mat # descending order of ranking and genes # return genes_mat[:,::-1], cor_mat[:,::-1] return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]
python
{ "resource": "" }
q269896
ranking_metric
test
def ranking_metric(df, method, pos, neg, classes, ascending): """The main function to rank an expression table. :param df: gene_expression DataFrame. :param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'. Others methods are: 1. 'signal_to_noise' You must have at least three samples for each phenotype to use this metric. The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations); that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.” 2. 't_test' Uses the difference of means scaled by the standard deviation and number of samples. Note: You must have at least three samples for each phenotype to use this metric. The larger the tTest ratio, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.” 3. 'ratio_of_classes' (also referred to as fold change). Uses the ratio of class means to calculate fold change for natural scale data. 4. 'diff_of_classes' Uses the difference of class means to calculate fold change for natural scale data 5. 'log2_ratio_of_classes' Uses the log2 ratio of class means to calculate fold change for natural scale data. This is the recommended statistic for calculating fold change for log scale data. :param str pos: one of labels of phenotype's names. :param str neg: one of labels of phenotype's names. :param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype. :param bool ascending: bool or list of bool. Sort ascending vs. descending. :return: returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings. visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html """ # exclude any zero stds. df_mean = df.groupby(by=classes, axis=1).mean() df_std = df.groupby(by=classes, axis=1).std() if method == 'signal_to_noise': ser = (df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg]) elif method == 't_test': ser = (df_mean[pos] - df_mean[neg])/ np.sqrt(df_std[pos]**2/len(df_std)+df_std[neg]**2/len(df_std) ) elif method == 'ratio_of_classes': ser = df_mean[pos] / df_mean[neg] elif method == 'diff_of_classes': ser = df_mean[pos] - df_mean[neg] elif method == 'log2_ratio_of_classes': ser = np.log2(df_mean[pos] / df_mean[neg]) else: logging.error("Please provide correct method name!!!") sys.exit(0) ser = ser.sort_values(ascending=ascending) return ser
python
{ "resource": "" }
q269897
gsea_pval
test
def gsea_pval(es, esnull): """Compute nominal p-value. From article (PNAS): estimate nominal p-value for S from esnull by using the positive or negative portion of the distribution corresponding to the sign of the observed ES(S). """ # to speed up, using numpy function to compute pval in parallel. condlist = [ es < 0, es >=0] choicelist = [np.sum(esnull < es.reshape(len(es),1), axis=1)/ np.sum(esnull < 0, axis=1), np.sum(esnull >= es.reshape(len(es),1), axis=1)/ np.sum(esnull >= 0, axis=1)] pval = np.select(condlist, choicelist) return pval
python
{ "resource": "" }
q269898
gsea_significance
test
def gsea_significance(enrichment_scores, enrichment_nulls): """Compute nominal pvals, normalized ES, and FDR q value. For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0. """ # For a zero by zero division (undetermined, results in a NaN), np.seterr(divide='ignore', invalid='ignore') # import warnings # warnings.simplefilter("ignore") es = np.array(enrichment_scores) esnull = np.array(enrichment_nulls) logging.debug("Start to compute pvals..................................") # compute pvals. enrichmentPVals = gsea_pval(es, esnull).tolist() logging.debug("Compute nes and nesnull.................................") # nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull) # new normalized enrichment score implementation. # this could speed up significantly. esnull_pos = (esnull*(esnull>=0)).mean(axis=1) esnull_neg = (esnull*(esnull<0)).mean(axis=1) nEnrichmentScores = np.where(es>=0, es/esnull_pos, -es/esnull_neg) nEnrichmentNulls = np.where(esnull>=0, esnull/esnull_pos[:,np.newaxis], -esnull/esnull_neg[:,np.newaxis]) logging.debug("start to compute fdrs..................................") # FDR null distribution histogram # create a histogram of all NES(S,pi) over all S and pi # Use this null distribution to compute an FDR q value, # vals = reduce(lambda x,y: x+y, nEnrichmentNulls, []) # nvals = np.array(sorted(vals)) # or nvals = np.sort(nEnrichmentNulls.flatten()) nnes = np.sort(nEnrichmentScores) fdrs = [] # FDR computation for i in range(len(enrichment_scores)): nes = nEnrichmentScores[i] # use the same pval method to calculate fdr if nes >= 0: allPos = int(len(nvals) - np.searchsorted(nvals, 0, side="left")) allHigherAndPos = int(len(nvals) - np.searchsorted(nvals, nes, side="left")) nesPos = len(nnes) - int(np.searchsorted(nnes, 0, side="left")) nesHigherAndPos = len(nnes) - int(np.searchsorted(nnes, nes, side="left")) # allPos = (nvals >= 0).sum() # allHigherAndPos = (nvals >= nes).sum() # nesPos = (nnes >=0).sum() # nesHigherAndPos = (nnes >= nes).sum() else: allPos = int(np.searchsorted(nvals, 0, side="left")) allHigherAndPos = int(np.searchsorted(nvals, nes, side="right")) nesPos = int(np.searchsorted(nnes, 0, side="left")) nesHigherAndPos = int(np.searchsorted(nnes, nes, side="right")) # allPos = (nvals < 0).sum() # allHigherAndPos = (nvals < nes).sum() # nesPos = (nnes < 0).sum() # nesHigherAndPos = (nnes < nes).sum() try: pi_norm = allHigherAndPos/float(allPos) pi_obs = nesHigherAndPos/float(nesPos) fdr = pi_norm / pi_obs fdrs.append(fdr if fdr < 1 else 1.0) except: fdrs.append(1000000000.0) logging.debug("Statistical testing finished.............................") return zip(enrichment_scores, nEnrichmentScores, enrichmentPVals, fdrs)
python
{ "resource": "" }
q269899
Biomart.get_marts
test
def get_marts(self): """Get available marts and their names.""" mart_names = pd.Series(self.names, name="Name") mart_descriptions = pd.Series(self.displayNames, name="Description") return pd.concat([mart_names, mart_descriptions], axis=1)
python
{ "resource": "" }