code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def construct_for(self, service_name): """ Builds a new, specialized ``Connection`` subclass for a given service. This will introspect a service, determine all the API calls it has & constructs a brand new class with those methods on it. :param service_name: The name of the service to construct a connection for. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :returns: A new connection class for that service """ # Construct a new ``ConnectionDetails`` (or similar class) for storing # the relevant details about the service & its operations. details = self.details_class(service_name, self.session) # Make sure the new class gets that ``ConnectionDetails`` instance as a # ``cls._details`` attribute. attrs = { '_details': details, } # Determine what we should call it. klass_name = self._build_class_name(service_name) # Construct what the class ought to have on it. attrs.update(self._build_methods(details)) # Create the class. return type( klass_name, (self.base_connection,), attrs )
Builds a new, specialized ``Connection`` subclass for a given service. This will introspect a service, determine all the API calls it has & constructs a brand new class with those methods on it. :param service_name: The name of the service to construct a connection for. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :returns: A new connection class for that service
Below is the the instruction that describes the task: ### Input: Builds a new, specialized ``Connection`` subclass for a given service. This will introspect a service, determine all the API calls it has & constructs a brand new class with those methods on it. :param service_name: The name of the service to construct a connection for. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :returns: A new connection class for that service ### Response: def construct_for(self, service_name): """ Builds a new, specialized ``Connection`` subclass for a given service. This will introspect a service, determine all the API calls it has & constructs a brand new class with those methods on it. :param service_name: The name of the service to construct a connection for. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :returns: A new connection class for that service """ # Construct a new ``ConnectionDetails`` (or similar class) for storing # the relevant details about the service & its operations. details = self.details_class(service_name, self.session) # Make sure the new class gets that ``ConnectionDetails`` instance as a # ``cls._details`` attribute. attrs = { '_details': details, } # Determine what we should call it. klass_name = self._build_class_name(service_name) # Construct what the class ought to have on it. attrs.update(self._build_methods(details)) # Create the class. return type( klass_name, (self.base_connection,), attrs )
def walk(fn, obj, *args, **kwargs): """Recursively walk an object graph applying `fn`/`args` to objects.""" if type(obj) in [list, tuple]: return list(walk(fn, o, *args) for o in obj) if type(obj) is dict: return dict((walk(fn, k, *args), walk(fn, v, *args)) for k, v in obj.items()) return fn(obj, *args, **kwargs)
Recursively walk an object graph applying `fn`/`args` to objects.
Below is the the instruction that describes the task: ### Input: Recursively walk an object graph applying `fn`/`args` to objects. ### Response: def walk(fn, obj, *args, **kwargs): """Recursively walk an object graph applying `fn`/`args` to objects.""" if type(obj) in [list, tuple]: return list(walk(fn, o, *args) for o in obj) if type(obj) is dict: return dict((walk(fn, k, *args), walk(fn, v, *args)) for k, v in obj.items()) return fn(obj, *args, **kwargs)
def cache_file(symbol, func, has_date, root, date_type='date'): """ Data file Args: symbol: symbol func: use function to categorize data has_date: contains date in data file root: root path date_type: parameters pass to utils.cur_time, [date, time, time_path, ...] Returns: str: date file """ cur_mod = sys.modules[func.__module__] data_tz = getattr(cur_mod, 'DATA_TZ') if hasattr(cur_mod, 'DATA_TZ') else 'UTC' cur_dt = utils.cur_time(typ=date_type, tz=data_tz, trading=False) if has_date: if hasattr(cur_mod, 'FILE_WITH_DATE'): file_fmt = getattr(cur_mod, 'FILE_WITH_DATE') else: file_fmt = '{root}/{typ}/{symbol}/{cur_dt}.parq' else: if hasattr(cur_mod, 'FILE_NO_DATE'): file_fmt = getattr(cur_mod, 'FILE_NO_DATE') else: file_fmt = '{root}/{typ}/{symbol}.parq' return data_file( file_fmt=file_fmt, root=root, cur_dt=cur_dt, typ=func.__name__, symbol=symbol )
Data file Args: symbol: symbol func: use function to categorize data has_date: contains date in data file root: root path date_type: parameters pass to utils.cur_time, [date, time, time_path, ...] Returns: str: date file
Below is the the instruction that describes the task: ### Input: Data file Args: symbol: symbol func: use function to categorize data has_date: contains date in data file root: root path date_type: parameters pass to utils.cur_time, [date, time, time_path, ...] Returns: str: date file ### Response: def cache_file(symbol, func, has_date, root, date_type='date'): """ Data file Args: symbol: symbol func: use function to categorize data has_date: contains date in data file root: root path date_type: parameters pass to utils.cur_time, [date, time, time_path, ...] Returns: str: date file """ cur_mod = sys.modules[func.__module__] data_tz = getattr(cur_mod, 'DATA_TZ') if hasattr(cur_mod, 'DATA_TZ') else 'UTC' cur_dt = utils.cur_time(typ=date_type, tz=data_tz, trading=False) if has_date: if hasattr(cur_mod, 'FILE_WITH_DATE'): file_fmt = getattr(cur_mod, 'FILE_WITH_DATE') else: file_fmt = '{root}/{typ}/{symbol}/{cur_dt}.parq' else: if hasattr(cur_mod, 'FILE_NO_DATE'): file_fmt = getattr(cur_mod, 'FILE_NO_DATE') else: file_fmt = '{root}/{typ}/{symbol}.parq' return data_file( file_fmt=file_fmt, root=root, cur_dt=cur_dt, typ=func.__name__, symbol=symbol )
def add_members(self, project, params={}, **options): """Adds the specified list of users as members of the project. Returns the updated project record. Parameters ---------- project : {Id} The project to add members to. [data] : {Object} Data for the request - members : {Array} An array of members to add to the project. """ path = "/projects/%s/addMembers" % (project) return self.client.post(path, params, **options)
Adds the specified list of users as members of the project. Returns the updated project record. Parameters ---------- project : {Id} The project to add members to. [data] : {Object} Data for the request - members : {Array} An array of members to add to the project.
Below is the the instruction that describes the task: ### Input: Adds the specified list of users as members of the project. Returns the updated project record. Parameters ---------- project : {Id} The project to add members to. [data] : {Object} Data for the request - members : {Array} An array of members to add to the project. ### Response: def add_members(self, project, params={}, **options): """Adds the specified list of users as members of the project. Returns the updated project record. Parameters ---------- project : {Id} The project to add members to. [data] : {Object} Data for the request - members : {Array} An array of members to add to the project. """ path = "/projects/%s/addMembers" % (project) return self.client.post(path, params, **options)
def prt_hier_down(self, goid, prt=sys.stdout): """Write hierarchy for all GO IDs below GO ID in arg, goid.""" wrhiercfg = self._get_wrhiercfg() obj = WrHierPrt(self.gosubdag.go2obj, self.gosubdag.go2nt, wrhiercfg, prt) obj.prt_hier_rec(goid) return obj.items_list
Write hierarchy for all GO IDs below GO ID in arg, goid.
Below is the the instruction that describes the task: ### Input: Write hierarchy for all GO IDs below GO ID in arg, goid. ### Response: def prt_hier_down(self, goid, prt=sys.stdout): """Write hierarchy for all GO IDs below GO ID in arg, goid.""" wrhiercfg = self._get_wrhiercfg() obj = WrHierPrt(self.gosubdag.go2obj, self.gosubdag.go2nt, wrhiercfg, prt) obj.prt_hier_rec(goid) return obj.items_list
def ensure_number(num, *args, **kwargs): """ Returns *num* again if it is an instance of :py:class:`Number`, or uses all passed arguments to create one and returns it. """ return num if isinstance(num, Number) else Number(num, *args, **kwargs)
Returns *num* again if it is an instance of :py:class:`Number`, or uses all passed arguments to create one and returns it.
Below is the the instruction that describes the task: ### Input: Returns *num* again if it is an instance of :py:class:`Number`, or uses all passed arguments to create one and returns it. ### Response: def ensure_number(num, *args, **kwargs): """ Returns *num* again if it is an instance of :py:class:`Number`, or uses all passed arguments to create one and returns it. """ return num if isinstance(num, Number) else Number(num, *args, **kwargs)
def sign_cert(self, ca, csr, expires=None, algorithm=None, subject=None, cn_in_san=True, csr_format=Encoding.PEM, subject_alternative_name=None, key_usage=None, extended_key_usage=None, tls_feature=None, ocsp_no_check=False, extra_extensions=None, password=None): """Create a signed certificate from a CSR. **PLEASE NOTE:** This function creates the raw certificate and is usually not invoked directly. It is called by :py:func:`Certificate.objects.init() <django_ca.managers.CertificateManager.init>`, which passes along all parameters unchanged and saves the raw certificate to the database. Parameters ---------- ca : :py:class:`~django_ca.models.CertificateAuthority` The certificate authority to sign the certificate with. csr : str A valid CSR. The format is given by the ``csr_format`` parameter. expires : datetime, optional Datetime for when this certificate will expire, defaults to the ``CA_DEFAULT_EXPIRES`` setting. algorithm : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional Hash algorithm used when signing the certificate, passed to :py:func:`~django_ca.utils.parse_hash_algorithm`. The default is the value of the :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>` setting. subject : dict or str or :py:class:`~django_ca.subject.Subject` Subject string, e.g. ``"/CN=example.com"`` or ``Subject("/CN=example.com")``. The value is actually passed to :py:class:`~django_ca.subject.Subject` if it is not already an instance of that class. If this value is not passed or if the value does not contain a CommonName, the first value of the ``subject_alternative_name`` parameter is used as CommonName. cn_in_san : bool, optional Wether the CommonName should also be included as subjectAlternativeName. The default is ``True``, but the parameter is ignored if no CommonName is given. This is typically set to ``False`` when creating a client certificate, where the subjects CommonName has no meaningful value as subjectAlternativeName. csr_format : :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding`, optional The format of the CSR. The default is ``PEM``. subject_alternative_name : list of str or :py:class:`~django_ca.extensions.SubjectAlternativeName`, optional A list of alternative names for the certificate. The value is passed to :py:class:`~django_ca.extensions.SubjectAlternativeName` if not already an instance of that class. key_usage : str or dict or :py:class:`~django_ca.extensions.KeyUsage`, optional Value for the ``keyUsage`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.KeyUsage` if not already an instance of that class. extended_key_usage : str or dict or :py:class:`~django_ca.extensions.ExtendedKeyUsage`, optional Value for the ``extendedKeyUsage`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.ExtendedKeyUsage` if not already an instance of that class. tls_feature : str or dict or :py:class:`~django_ca.extensions.TLSFeature`, optional Value for the ``TLSFeature`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.TLSFeature` if not already an instance of that class. ocsp_no_check : bool, optional Add the OCSPNoCheck flag, indicating that an OCSP client should trust this certificate for it's lifetime. This value only makes sense if you intend to use the certificate for an OCSP responder, the default is ``False``. See `RFC 6990, section 4.2.2.2.1 <https://tools.ietf.org/html/rfc6960#section-4.2.2.2>`_ for more information. extra_extensions : list of :py:class:`cg:cryptography.x509.Extension` or \ :py:class:`django_ca.extensions.Extension`, optional An optional list of additional extensions to add to the certificate. password : bytes, optional Password used to load the private key of the certificate authority. If not passed, the private key is assumed to be unencrypted. Returns ------- cryptography.x509.Certificate The signed certificate. """ ######################## # Normalize parameters # ######################## if subject is None: subject = Subject() # we need a subject instance so we can possibly add the CN elif not isinstance(subject, Subject): subject = Subject(subject) if 'CN' not in subject and not subject_alternative_name: raise ValueError("Must name at least a CN or a subjectAlternativeName.") algorithm = parse_hash_algorithm(algorithm) # Normalize extensions to django_ca.extensions.Extension subclasses if key_usage and not isinstance(key_usage, KeyUsage): key_usage = KeyUsage(key_usage) if extended_key_usage and not isinstance(extended_key_usage, ExtendedKeyUsage): extended_key_usage = ExtendedKeyUsage(extended_key_usage) if tls_feature and not isinstance(tls_feature, TLSFeature): tls_feature = TLSFeature(tls_feature) if not subject_alternative_name: subject_alternative_name = SubjectAlternativeName([]) elif not isinstance(subject_alternative_name, SubjectAlternativeName): subject_alternative_name = SubjectAlternativeName(subject_alternative_name) # use first SAN as CN if CN is not set if 'CN' not in subject: subject['CN'] = subject_alternative_name.value[0].value elif cn_in_san and 'CN' in subject: # add CN to SAN if cn_in_san is True (default) try: cn_name = parse_general_name(subject['CN']) except idna.IDNAError: raise ValueError('%s: Could not parse CommonName as subjectAlternativeName.' % subject['CN']) else: if cn_name not in subject_alternative_name: subject_alternative_name.insert(0, cn_name) ################ # Read the CSR # ################ if csr_format == Encoding.PEM: req = x509.load_pem_x509_csr(force_bytes(csr), default_backend()) elif csr_format == Encoding.DER: req = x509.load_der_x509_csr(force_bytes(csr), default_backend()) else: raise ValueError('Unknown CSR format passed: %s' % csr_format) ######################### # Send pre-issue signal # ######################### pre_issue_cert.send(sender=self.model, ca=ca, csr=csr, expires=expires, algorithm=algorithm, subject=subject, cn_in_san=cn_in_san, csr_format=csr_format, subject_alternative_name=subject_alternative_name, key_usage=key_usage, extended_key_usage=extended_key_usage, tls_featur=tls_feature, extra_extensions=extra_extensions, password=password) ####################### # Generate public key # ####################### public_key = req.public_key() builder = get_cert_builder(expires) builder = builder.public_key(public_key) builder = builder.issuer_name(ca.x509.subject) builder = builder.subject_name(subject.name) # Add extensions builder = builder.add_extension(x509.BasicConstraints(ca=False, path_length=None), critical=True) builder = builder.add_extension( x509.SubjectKeyIdentifier.from_public_key(public_key), critical=False) # Get authorityKeyIdentifier from subjectKeyIdentifier from signing CA builder = builder.add_extension(ca.get_authority_key_identifier(), critical=False) for critical, ext in self.get_common_extensions(ca.issuer_url, ca.crl_url, ca.ocsp_url): builder = builder.add_extension(ext, critical=critical) if subject_alternative_name: builder = builder.add_extension(**subject_alternative_name.for_builder()) if key_usage: builder = builder.add_extension(**key_usage.for_builder()) if extended_key_usage: builder = builder.add_extension(**extended_key_usage.for_builder()) if tls_feature: builder = builder.add_extension(**tls_feature.for_builder()) if ca.issuer_alt_name: issuer_alt_name = IssuerAlternativeName(ca.issuer_alt_name) builder = builder.add_extension(**issuer_alt_name.for_builder()) if ocsp_no_check: builder = builder.add_extension(**OCSPNoCheck().for_builder()) if extra_extensions: builder = self._extra_extensions(builder, extra_extensions) ################### # Sign public key # ################### cert = builder.sign(private_key=ca.key(password), algorithm=algorithm, backend=default_backend()) return cert, req
Create a signed certificate from a CSR. **PLEASE NOTE:** This function creates the raw certificate and is usually not invoked directly. It is called by :py:func:`Certificate.objects.init() <django_ca.managers.CertificateManager.init>`, which passes along all parameters unchanged and saves the raw certificate to the database. Parameters ---------- ca : :py:class:`~django_ca.models.CertificateAuthority` The certificate authority to sign the certificate with. csr : str A valid CSR. The format is given by the ``csr_format`` parameter. expires : datetime, optional Datetime for when this certificate will expire, defaults to the ``CA_DEFAULT_EXPIRES`` setting. algorithm : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional Hash algorithm used when signing the certificate, passed to :py:func:`~django_ca.utils.parse_hash_algorithm`. The default is the value of the :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>` setting. subject : dict or str or :py:class:`~django_ca.subject.Subject` Subject string, e.g. ``"/CN=example.com"`` or ``Subject("/CN=example.com")``. The value is actually passed to :py:class:`~django_ca.subject.Subject` if it is not already an instance of that class. If this value is not passed or if the value does not contain a CommonName, the first value of the ``subject_alternative_name`` parameter is used as CommonName. cn_in_san : bool, optional Wether the CommonName should also be included as subjectAlternativeName. The default is ``True``, but the parameter is ignored if no CommonName is given. This is typically set to ``False`` when creating a client certificate, where the subjects CommonName has no meaningful value as subjectAlternativeName. csr_format : :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding`, optional The format of the CSR. The default is ``PEM``. subject_alternative_name : list of str or :py:class:`~django_ca.extensions.SubjectAlternativeName`, optional A list of alternative names for the certificate. The value is passed to :py:class:`~django_ca.extensions.SubjectAlternativeName` if not already an instance of that class. key_usage : str or dict or :py:class:`~django_ca.extensions.KeyUsage`, optional Value for the ``keyUsage`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.KeyUsage` if not already an instance of that class. extended_key_usage : str or dict or :py:class:`~django_ca.extensions.ExtendedKeyUsage`, optional Value for the ``extendedKeyUsage`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.ExtendedKeyUsage` if not already an instance of that class. tls_feature : str or dict or :py:class:`~django_ca.extensions.TLSFeature`, optional Value for the ``TLSFeature`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.TLSFeature` if not already an instance of that class. ocsp_no_check : bool, optional Add the OCSPNoCheck flag, indicating that an OCSP client should trust this certificate for it's lifetime. This value only makes sense if you intend to use the certificate for an OCSP responder, the default is ``False``. See `RFC 6990, section 4.2.2.2.1 <https://tools.ietf.org/html/rfc6960#section-4.2.2.2>`_ for more information. extra_extensions : list of :py:class:`cg:cryptography.x509.Extension` or \ :py:class:`django_ca.extensions.Extension`, optional An optional list of additional extensions to add to the certificate. password : bytes, optional Password used to load the private key of the certificate authority. If not passed, the private key is assumed to be unencrypted. Returns ------- cryptography.x509.Certificate The signed certificate.
Below is the the instruction that describes the task: ### Input: Create a signed certificate from a CSR. **PLEASE NOTE:** This function creates the raw certificate and is usually not invoked directly. It is called by :py:func:`Certificate.objects.init() <django_ca.managers.CertificateManager.init>`, which passes along all parameters unchanged and saves the raw certificate to the database. Parameters ---------- ca : :py:class:`~django_ca.models.CertificateAuthority` The certificate authority to sign the certificate with. csr : str A valid CSR. The format is given by the ``csr_format`` parameter. expires : datetime, optional Datetime for when this certificate will expire, defaults to the ``CA_DEFAULT_EXPIRES`` setting. algorithm : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional Hash algorithm used when signing the certificate, passed to :py:func:`~django_ca.utils.parse_hash_algorithm`. The default is the value of the :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>` setting. subject : dict or str or :py:class:`~django_ca.subject.Subject` Subject string, e.g. ``"/CN=example.com"`` or ``Subject("/CN=example.com")``. The value is actually passed to :py:class:`~django_ca.subject.Subject` if it is not already an instance of that class. If this value is not passed or if the value does not contain a CommonName, the first value of the ``subject_alternative_name`` parameter is used as CommonName. cn_in_san : bool, optional Wether the CommonName should also be included as subjectAlternativeName. The default is ``True``, but the parameter is ignored if no CommonName is given. This is typically set to ``False`` when creating a client certificate, where the subjects CommonName has no meaningful value as subjectAlternativeName. csr_format : :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding`, optional The format of the CSR. The default is ``PEM``. subject_alternative_name : list of str or :py:class:`~django_ca.extensions.SubjectAlternativeName`, optional A list of alternative names for the certificate. The value is passed to :py:class:`~django_ca.extensions.SubjectAlternativeName` if not already an instance of that class. key_usage : str or dict or :py:class:`~django_ca.extensions.KeyUsage`, optional Value for the ``keyUsage`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.KeyUsage` if not already an instance of that class. extended_key_usage : str or dict or :py:class:`~django_ca.extensions.ExtendedKeyUsage`, optional Value for the ``extendedKeyUsage`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.ExtendedKeyUsage` if not already an instance of that class. tls_feature : str or dict or :py:class:`~django_ca.extensions.TLSFeature`, optional Value for the ``TLSFeature`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.TLSFeature` if not already an instance of that class. ocsp_no_check : bool, optional Add the OCSPNoCheck flag, indicating that an OCSP client should trust this certificate for it's lifetime. This value only makes sense if you intend to use the certificate for an OCSP responder, the default is ``False``. See `RFC 6990, section 4.2.2.2.1 <https://tools.ietf.org/html/rfc6960#section-4.2.2.2>`_ for more information. extra_extensions : list of :py:class:`cg:cryptography.x509.Extension` or \ :py:class:`django_ca.extensions.Extension`, optional An optional list of additional extensions to add to the certificate. password : bytes, optional Password used to load the private key of the certificate authority. If not passed, the private key is assumed to be unencrypted. Returns ------- cryptography.x509.Certificate The signed certificate. ### Response: def sign_cert(self, ca, csr, expires=None, algorithm=None, subject=None, cn_in_san=True, csr_format=Encoding.PEM, subject_alternative_name=None, key_usage=None, extended_key_usage=None, tls_feature=None, ocsp_no_check=False, extra_extensions=None, password=None): """Create a signed certificate from a CSR. **PLEASE NOTE:** This function creates the raw certificate and is usually not invoked directly. It is called by :py:func:`Certificate.objects.init() <django_ca.managers.CertificateManager.init>`, which passes along all parameters unchanged and saves the raw certificate to the database. Parameters ---------- ca : :py:class:`~django_ca.models.CertificateAuthority` The certificate authority to sign the certificate with. csr : str A valid CSR. The format is given by the ``csr_format`` parameter. expires : datetime, optional Datetime for when this certificate will expire, defaults to the ``CA_DEFAULT_EXPIRES`` setting. algorithm : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional Hash algorithm used when signing the certificate, passed to :py:func:`~django_ca.utils.parse_hash_algorithm`. The default is the value of the :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>` setting. subject : dict or str or :py:class:`~django_ca.subject.Subject` Subject string, e.g. ``"/CN=example.com"`` or ``Subject("/CN=example.com")``. The value is actually passed to :py:class:`~django_ca.subject.Subject` if it is not already an instance of that class. If this value is not passed or if the value does not contain a CommonName, the first value of the ``subject_alternative_name`` parameter is used as CommonName. cn_in_san : bool, optional Wether the CommonName should also be included as subjectAlternativeName. The default is ``True``, but the parameter is ignored if no CommonName is given. This is typically set to ``False`` when creating a client certificate, where the subjects CommonName has no meaningful value as subjectAlternativeName. csr_format : :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding`, optional The format of the CSR. The default is ``PEM``. subject_alternative_name : list of str or :py:class:`~django_ca.extensions.SubjectAlternativeName`, optional A list of alternative names for the certificate. The value is passed to :py:class:`~django_ca.extensions.SubjectAlternativeName` if not already an instance of that class. key_usage : str or dict or :py:class:`~django_ca.extensions.KeyUsage`, optional Value for the ``keyUsage`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.KeyUsage` if not already an instance of that class. extended_key_usage : str or dict or :py:class:`~django_ca.extensions.ExtendedKeyUsage`, optional Value for the ``extendedKeyUsage`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.ExtendedKeyUsage` if not already an instance of that class. tls_feature : str or dict or :py:class:`~django_ca.extensions.TLSFeature`, optional Value for the ``TLSFeature`` X509 extension. The value is passed to :py:class:`~django_ca.extensions.TLSFeature` if not already an instance of that class. ocsp_no_check : bool, optional Add the OCSPNoCheck flag, indicating that an OCSP client should trust this certificate for it's lifetime. This value only makes sense if you intend to use the certificate for an OCSP responder, the default is ``False``. See `RFC 6990, section 4.2.2.2.1 <https://tools.ietf.org/html/rfc6960#section-4.2.2.2>`_ for more information. extra_extensions : list of :py:class:`cg:cryptography.x509.Extension` or \ :py:class:`django_ca.extensions.Extension`, optional An optional list of additional extensions to add to the certificate. password : bytes, optional Password used to load the private key of the certificate authority. If not passed, the private key is assumed to be unencrypted. Returns ------- cryptography.x509.Certificate The signed certificate. """ ######################## # Normalize parameters # ######################## if subject is None: subject = Subject() # we need a subject instance so we can possibly add the CN elif not isinstance(subject, Subject): subject = Subject(subject) if 'CN' not in subject and not subject_alternative_name: raise ValueError("Must name at least a CN or a subjectAlternativeName.") algorithm = parse_hash_algorithm(algorithm) # Normalize extensions to django_ca.extensions.Extension subclasses if key_usage and not isinstance(key_usage, KeyUsage): key_usage = KeyUsage(key_usage) if extended_key_usage and not isinstance(extended_key_usage, ExtendedKeyUsage): extended_key_usage = ExtendedKeyUsage(extended_key_usage) if tls_feature and not isinstance(tls_feature, TLSFeature): tls_feature = TLSFeature(tls_feature) if not subject_alternative_name: subject_alternative_name = SubjectAlternativeName([]) elif not isinstance(subject_alternative_name, SubjectAlternativeName): subject_alternative_name = SubjectAlternativeName(subject_alternative_name) # use first SAN as CN if CN is not set if 'CN' not in subject: subject['CN'] = subject_alternative_name.value[0].value elif cn_in_san and 'CN' in subject: # add CN to SAN if cn_in_san is True (default) try: cn_name = parse_general_name(subject['CN']) except idna.IDNAError: raise ValueError('%s: Could not parse CommonName as subjectAlternativeName.' % subject['CN']) else: if cn_name not in subject_alternative_name: subject_alternative_name.insert(0, cn_name) ################ # Read the CSR # ################ if csr_format == Encoding.PEM: req = x509.load_pem_x509_csr(force_bytes(csr), default_backend()) elif csr_format == Encoding.DER: req = x509.load_der_x509_csr(force_bytes(csr), default_backend()) else: raise ValueError('Unknown CSR format passed: %s' % csr_format) ######################### # Send pre-issue signal # ######################### pre_issue_cert.send(sender=self.model, ca=ca, csr=csr, expires=expires, algorithm=algorithm, subject=subject, cn_in_san=cn_in_san, csr_format=csr_format, subject_alternative_name=subject_alternative_name, key_usage=key_usage, extended_key_usage=extended_key_usage, tls_featur=tls_feature, extra_extensions=extra_extensions, password=password) ####################### # Generate public key # ####################### public_key = req.public_key() builder = get_cert_builder(expires) builder = builder.public_key(public_key) builder = builder.issuer_name(ca.x509.subject) builder = builder.subject_name(subject.name) # Add extensions builder = builder.add_extension(x509.BasicConstraints(ca=False, path_length=None), critical=True) builder = builder.add_extension( x509.SubjectKeyIdentifier.from_public_key(public_key), critical=False) # Get authorityKeyIdentifier from subjectKeyIdentifier from signing CA builder = builder.add_extension(ca.get_authority_key_identifier(), critical=False) for critical, ext in self.get_common_extensions(ca.issuer_url, ca.crl_url, ca.ocsp_url): builder = builder.add_extension(ext, critical=critical) if subject_alternative_name: builder = builder.add_extension(**subject_alternative_name.for_builder()) if key_usage: builder = builder.add_extension(**key_usage.for_builder()) if extended_key_usage: builder = builder.add_extension(**extended_key_usage.for_builder()) if tls_feature: builder = builder.add_extension(**tls_feature.for_builder()) if ca.issuer_alt_name: issuer_alt_name = IssuerAlternativeName(ca.issuer_alt_name) builder = builder.add_extension(**issuer_alt_name.for_builder()) if ocsp_no_check: builder = builder.add_extension(**OCSPNoCheck().for_builder()) if extra_extensions: builder = self._extra_extensions(builder, extra_extensions) ################### # Sign public key # ################### cert = builder.sign(private_key=ca.key(password), algorithm=algorithm, backend=default_backend()) return cert, req
def _interpolate_cube(self, lon, lat, egy=None, interp_log=True): """Perform interpolation on a healpix cube. If egy is None then interpolation will be performed on the existing energy planes. """ shape = np.broadcast(lon, lat, egy).shape lon = lon * np.ones(shape) lat = lat * np.ones(shape) theta = np.pi / 2. - np.radians(lat) phi = np.radians(lon) vals = [] for i, _ in enumerate(self.hpx.evals): v = hp.pixelfunc.get_interp_val(self.counts[i], theta, phi, nest=self.hpx.nest) vals += [np.expand_dims(np.array(v, ndmin=1), -1)] vals = np.concatenate(vals, axis=-1) if egy is None: return vals.T egy = egy * np.ones(shape) if interp_log: xvals = utils.val_to_pix(np.log(self.hpx.evals), np.log(egy)) else: xvals = utils.val_to_pix(self.hpx.evals, egy) vals = vals.reshape((-1, vals.shape[-1])) xvals = np.ravel(xvals) v = map_coordinates(vals, [np.arange(vals.shape[0]), xvals], order=1) return v.reshape(shape)
Perform interpolation on a healpix cube. If egy is None then interpolation will be performed on the existing energy planes.
Below is the the instruction that describes the task: ### Input: Perform interpolation on a healpix cube. If egy is None then interpolation will be performed on the existing energy planes. ### Response: def _interpolate_cube(self, lon, lat, egy=None, interp_log=True): """Perform interpolation on a healpix cube. If egy is None then interpolation will be performed on the existing energy planes. """ shape = np.broadcast(lon, lat, egy).shape lon = lon * np.ones(shape) lat = lat * np.ones(shape) theta = np.pi / 2. - np.radians(lat) phi = np.radians(lon) vals = [] for i, _ in enumerate(self.hpx.evals): v = hp.pixelfunc.get_interp_val(self.counts[i], theta, phi, nest=self.hpx.nest) vals += [np.expand_dims(np.array(v, ndmin=1), -1)] vals = np.concatenate(vals, axis=-1) if egy is None: return vals.T egy = egy * np.ones(shape) if interp_log: xvals = utils.val_to_pix(np.log(self.hpx.evals), np.log(egy)) else: xvals = utils.val_to_pix(self.hpx.evals, egy) vals = vals.reshape((-1, vals.shape[-1])) xvals = np.ravel(xvals) v = map_coordinates(vals, [np.arange(vals.shape[0]), xvals], order=1) return v.reshape(shape)
def ReadArtifact(self, name, cursor=None): """Looks up an artifact with given name from the database.""" cursor.execute("SELECT definition FROM artifacts WHERE name = %s", [name]) row = cursor.fetchone() if row is None: raise db.UnknownArtifactError(name) else: return _RowToArtifact(row)
Looks up an artifact with given name from the database.
Below is the the instruction that describes the task: ### Input: Looks up an artifact with given name from the database. ### Response: def ReadArtifact(self, name, cursor=None): """Looks up an artifact with given name from the database.""" cursor.execute("SELECT definition FROM artifacts WHERE name = %s", [name]) row = cursor.fetchone() if row is None: raise db.UnknownArtifactError(name) else: return _RowToArtifact(row)
def location(self, relative_alt=False): '''return current location''' self.wait_gps_fix() # wait for another VFR_HUD, to ensure we have correct altitude self.recv_match(type='VFR_HUD', blocking=True) self.recv_match(type='GLOBAL_POSITION_INT', blocking=True) if relative_alt: alt = self.messages['GLOBAL_POSITION_INT'].relative_alt*0.001 else: alt = self.messages['VFR_HUD'].alt return location(self.messages['GPS_RAW_INT'].lat*1.0e-7, self.messages['GPS_RAW_INT'].lon*1.0e-7, alt, self.messages['VFR_HUD'].heading)
return current location
Below is the the instruction that describes the task: ### Input: return current location ### Response: def location(self, relative_alt=False): '''return current location''' self.wait_gps_fix() # wait for another VFR_HUD, to ensure we have correct altitude self.recv_match(type='VFR_HUD', blocking=True) self.recv_match(type='GLOBAL_POSITION_INT', blocking=True) if relative_alt: alt = self.messages['GLOBAL_POSITION_INT'].relative_alt*0.001 else: alt = self.messages['VFR_HUD'].alt return location(self.messages['GPS_RAW_INT'].lat*1.0e-7, self.messages['GPS_RAW_INT'].lon*1.0e-7, alt, self.messages['VFR_HUD'].heading)
def get_gcloud_pricelist(): """Retrieve latest pricelist from Google Cloud, or use cached copy if not reachable. """ try: r = requests.get('http://cloudpricingcalculator.appspot.com' '/static/data/pricelist.json') content = json.loads(r.content) except ConnectionError: logger.warning( "Couldn't get updated pricelist from " "http://cloudpricingcalculator.appspot.com" "/static/data/pricelist.json. Falling back to cached " "copy, but prices may be out of date.") with open('gcloudpricelist.json') as infile: content = json.load(infile) pricelist = content['gcp_price_list'] return pricelist
Retrieve latest pricelist from Google Cloud, or use cached copy if not reachable.
Below is the the instruction that describes the task: ### Input: Retrieve latest pricelist from Google Cloud, or use cached copy if not reachable. ### Response: def get_gcloud_pricelist(): """Retrieve latest pricelist from Google Cloud, or use cached copy if not reachable. """ try: r = requests.get('http://cloudpricingcalculator.appspot.com' '/static/data/pricelist.json') content = json.loads(r.content) except ConnectionError: logger.warning( "Couldn't get updated pricelist from " "http://cloudpricingcalculator.appspot.com" "/static/data/pricelist.json. Falling back to cached " "copy, but prices may be out of date.") with open('gcloudpricelist.json') as infile: content = json.load(infile) pricelist = content['gcp_price_list'] return pricelist
def gencode(data, output=None, tab=" ", indent=0, overwrite=False): """Generate code. :param data: must be list of class data, see a valid data example below :param output: default None, the python script file name you want to create :param tab: default " " :param indent: global indent setting :param overwrite: if True, silently overwrite the output file :: data = [ { "classname": "Database", "metadata": {"db_id": 1, "name": "Database"}, "subclass": [ { "classname": "PersonCollection", "metadata": {"collection_id": 1, "name": "Person", "create_date": datetime.date(2016, 1, 8)}, "subclass": [ { "classname": "Person", "metadata": {"person_id": 1, "name": "Jack"}, }, { "classname": "Person", "metadata": {"person_id": 2, "name": "Paul"}, }, ], }, { "classname": "DepartmentCollection", "metadata": {"collection_id": 2, "name": "Department", "create_date": datetime.date(2016, 1, 1)}, "subclass": [ { "classname": "Department", "metadata": {"department_id": 1, "name": "IT"}, }, { "classname": "Department", "metadata": {"department_id": 2, "name": "HR"}, }, ] }, ], }, ] """ codegen = CodeGenerator(tab=tab, indent=indent) if isinstance(data, list): for class_data in data: codegen.pre_process(class_data) codegen.sort_metadata() codegen.repr_class_data(class_data) for class_data in data: codegen.lines.append("") codegen.lines.append("%s" % codegen.repr_new_instance(class_data)) elif isinstance(data, dict): codegen.pre_process(data) codegen.repr_class_data(data) codegen.lines.append("") codegen.lines.append("%s" % codegen.repr_new_instance(data)) if output: if not overwrite: if os.path.exists(output): raise FileExistsError("%r" % output) with open(output, "wb") as f: f.write(codegen.code.encode("utf-8")) else: print(codegen.code)
Generate code. :param data: must be list of class data, see a valid data example below :param output: default None, the python script file name you want to create :param tab: default " " :param indent: global indent setting :param overwrite: if True, silently overwrite the output file :: data = [ { "classname": "Database", "metadata": {"db_id": 1, "name": "Database"}, "subclass": [ { "classname": "PersonCollection", "metadata": {"collection_id": 1, "name": "Person", "create_date": datetime.date(2016, 1, 8)}, "subclass": [ { "classname": "Person", "metadata": {"person_id": 1, "name": "Jack"}, }, { "classname": "Person", "metadata": {"person_id": 2, "name": "Paul"}, }, ], }, { "classname": "DepartmentCollection", "metadata": {"collection_id": 2, "name": "Department", "create_date": datetime.date(2016, 1, 1)}, "subclass": [ { "classname": "Department", "metadata": {"department_id": 1, "name": "IT"}, }, { "classname": "Department", "metadata": {"department_id": 2, "name": "HR"}, }, ] }, ], }, ]
Below is the the instruction that describes the task: ### Input: Generate code. :param data: must be list of class data, see a valid data example below :param output: default None, the python script file name you want to create :param tab: default " " :param indent: global indent setting :param overwrite: if True, silently overwrite the output file :: data = [ { "classname": "Database", "metadata": {"db_id": 1, "name": "Database"}, "subclass": [ { "classname": "PersonCollection", "metadata": {"collection_id": 1, "name": "Person", "create_date": datetime.date(2016, 1, 8)}, "subclass": [ { "classname": "Person", "metadata": {"person_id": 1, "name": "Jack"}, }, { "classname": "Person", "metadata": {"person_id": 2, "name": "Paul"}, }, ], }, { "classname": "DepartmentCollection", "metadata": {"collection_id": 2, "name": "Department", "create_date": datetime.date(2016, 1, 1)}, "subclass": [ { "classname": "Department", "metadata": {"department_id": 1, "name": "IT"}, }, { "classname": "Department", "metadata": {"department_id": 2, "name": "HR"}, }, ] }, ], }, ] ### Response: def gencode(data, output=None, tab=" ", indent=0, overwrite=False): """Generate code. :param data: must be list of class data, see a valid data example below :param output: default None, the python script file name you want to create :param tab: default " " :param indent: global indent setting :param overwrite: if True, silently overwrite the output file :: data = [ { "classname": "Database", "metadata": {"db_id": 1, "name": "Database"}, "subclass": [ { "classname": "PersonCollection", "metadata": {"collection_id": 1, "name": "Person", "create_date": datetime.date(2016, 1, 8)}, "subclass": [ { "classname": "Person", "metadata": {"person_id": 1, "name": "Jack"}, }, { "classname": "Person", "metadata": {"person_id": 2, "name": "Paul"}, }, ], }, { "classname": "DepartmentCollection", "metadata": {"collection_id": 2, "name": "Department", "create_date": datetime.date(2016, 1, 1)}, "subclass": [ { "classname": "Department", "metadata": {"department_id": 1, "name": "IT"}, }, { "classname": "Department", "metadata": {"department_id": 2, "name": "HR"}, }, ] }, ], }, ] """ codegen = CodeGenerator(tab=tab, indent=indent) if isinstance(data, list): for class_data in data: codegen.pre_process(class_data) codegen.sort_metadata() codegen.repr_class_data(class_data) for class_data in data: codegen.lines.append("") codegen.lines.append("%s" % codegen.repr_new_instance(class_data)) elif isinstance(data, dict): codegen.pre_process(data) codegen.repr_class_data(data) codegen.lines.append("") codegen.lines.append("%s" % codegen.repr_new_instance(data)) if output: if not overwrite: if os.path.exists(output): raise FileExistsError("%r" % output) with open(output, "wb") as f: f.write(codegen.code.encode("utf-8")) else: print(codegen.code)
def _update_defaults(self, defaults): """Updates the given defaults with values from the config files and the environ. Does a little special handling for certain types of options (lists).""" # Accumulate complex default state. self.values = optparse.Values(self.defaults) late_eval = set() # Then set the options with those values for key, val in self._get_ordered_configuration_items(): # '--' because configuration supports only long names option = self.get_option('--' + key) # Ignore options not present in this parser. E.g. non-globals put # in [global] by users that want them to apply to all applicable # commands. if option is None: continue if option.action in ('store_true', 'store_false', 'count'): try: val = strtobool(val) except ValueError: error_msg = invalid_config_error_message( option.action, key, val ) self.error(error_msg) elif option.action == 'append': val = val.split() val = [self.check_default(option, key, v) for v in val] elif option.action == 'callback': late_eval.add(option.dest) opt_str = option.get_opt_string() val = option.convert_value(opt_str, val) # From take_action args = option.callback_args or () kwargs = option.callback_kwargs or {} option.callback(option, opt_str, val, self, *args, **kwargs) else: val = self.check_default(option, key, val) defaults[option.dest] = val for key in late_eval: defaults[key] = getattr(self.values, key) self.values = None return defaults
Updates the given defaults with values from the config files and the environ. Does a little special handling for certain types of options (lists).
Below is the the instruction that describes the task: ### Input: Updates the given defaults with values from the config files and the environ. Does a little special handling for certain types of options (lists). ### Response: def _update_defaults(self, defaults): """Updates the given defaults with values from the config files and the environ. Does a little special handling for certain types of options (lists).""" # Accumulate complex default state. self.values = optparse.Values(self.defaults) late_eval = set() # Then set the options with those values for key, val in self._get_ordered_configuration_items(): # '--' because configuration supports only long names option = self.get_option('--' + key) # Ignore options not present in this parser. E.g. non-globals put # in [global] by users that want them to apply to all applicable # commands. if option is None: continue if option.action in ('store_true', 'store_false', 'count'): try: val = strtobool(val) except ValueError: error_msg = invalid_config_error_message( option.action, key, val ) self.error(error_msg) elif option.action == 'append': val = val.split() val = [self.check_default(option, key, v) for v in val] elif option.action == 'callback': late_eval.add(option.dest) opt_str = option.get_opt_string() val = option.convert_value(opt_str, val) # From take_action args = option.callback_args or () kwargs = option.callback_kwargs or {} option.callback(option, opt_str, val, self, *args, **kwargs) else: val = self.check_default(option, key, val) defaults[option.dest] = val for key in late_eval: defaults[key] = getattr(self.values, key) self.values = None return defaults
def ReadFlowObject(self, client_id, flow_id): """Reads a flow object from the database.""" try: return self.flows[(client_id, flow_id)].Copy() except KeyError: raise db.UnknownFlowError(client_id, flow_id)
Reads a flow object from the database.
Below is the the instruction that describes the task: ### Input: Reads a flow object from the database. ### Response: def ReadFlowObject(self, client_id, flow_id): """Reads a flow object from the database.""" try: return self.flows[(client_id, flow_id)].Copy() except KeyError: raise db.UnknownFlowError(client_id, flow_id)
def read(cls, dstore): """ :param dstore: a DataStore instance :returns: a :class:`CompositeRiskModel` instance """ oqparam = dstore['oqparam'] tmap = (dstore['taxonomy_mapping'] if 'taxonomy_mapping' in dstore else {}) crm = dstore.getitem('risk_model') # building dictionaries riskid -> loss_type -> risk_func fragdict, vulndict, consdict, retrodict = ( AccumDict(), AccumDict(), AccumDict(), AccumDict()) fragdict.limit_states = crm.attrs['limit_states'] for quoted_id, rm in crm.items(): riskid = unquote_plus(quoted_id) fragdict[riskid] = {} vulndict[riskid] = {} consdict[riskid] = {} retrodict[riskid] = {} for lt_kind in rm: lt, kind = lt_kind.rsplit('-', 1) rf = dstore['risk_model/%s/%s' % (quoted_id, lt_kind)] if kind == 'consequence': consdict[riskid][lt, kind] = rf elif kind == 'fragility': # rf is a FragilityFunctionList try: rf = rf.build( fragdict.limit_states, oqparam.continuous_fragility_discretization, oqparam.steps_per_interval) except ValueError as err: raise ValueError('%s: %s' % (riskid, err)) fragdict[riskid][lt, kind] = rf else: # rf is a vulnerability function rf.init() if lt.endswith('_retrofitted'): # strip _retrofitted, since len('_retrofitted') = 12 retrodict[riskid][lt[:-12], kind] = rf else: vulndict[riskid][lt, kind] = rf return CompositeRiskModel( oqparam, tmap, fragdict, vulndict, consdict, retrodict)
:param dstore: a DataStore instance :returns: a :class:`CompositeRiskModel` instance
Below is the the instruction that describes the task: ### Input: :param dstore: a DataStore instance :returns: a :class:`CompositeRiskModel` instance ### Response: def read(cls, dstore): """ :param dstore: a DataStore instance :returns: a :class:`CompositeRiskModel` instance """ oqparam = dstore['oqparam'] tmap = (dstore['taxonomy_mapping'] if 'taxonomy_mapping' in dstore else {}) crm = dstore.getitem('risk_model') # building dictionaries riskid -> loss_type -> risk_func fragdict, vulndict, consdict, retrodict = ( AccumDict(), AccumDict(), AccumDict(), AccumDict()) fragdict.limit_states = crm.attrs['limit_states'] for quoted_id, rm in crm.items(): riskid = unquote_plus(quoted_id) fragdict[riskid] = {} vulndict[riskid] = {} consdict[riskid] = {} retrodict[riskid] = {} for lt_kind in rm: lt, kind = lt_kind.rsplit('-', 1) rf = dstore['risk_model/%s/%s' % (quoted_id, lt_kind)] if kind == 'consequence': consdict[riskid][lt, kind] = rf elif kind == 'fragility': # rf is a FragilityFunctionList try: rf = rf.build( fragdict.limit_states, oqparam.continuous_fragility_discretization, oqparam.steps_per_interval) except ValueError as err: raise ValueError('%s: %s' % (riskid, err)) fragdict[riskid][lt, kind] = rf else: # rf is a vulnerability function rf.init() if lt.endswith('_retrofitted'): # strip _retrofitted, since len('_retrofitted') = 12 retrodict[riskid][lt[:-12], kind] = rf else: vulndict[riskid][lt, kind] = rf return CompositeRiskModel( oqparam, tmap, fragdict, vulndict, consdict, retrodict)
def use_isolated_vault_view(self): """Pass through to provider AuthorizationLookupSession.use_isolated_vault_view""" self._vault_view = ISOLATED # self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_isolated_vault_view() except AttributeError: pass
Pass through to provider AuthorizationLookupSession.use_isolated_vault_view
Below is the the instruction that describes the task: ### Input: Pass through to provider AuthorizationLookupSession.use_isolated_vault_view ### Response: def use_isolated_vault_view(self): """Pass through to provider AuthorizationLookupSession.use_isolated_vault_view""" self._vault_view = ISOLATED # self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_isolated_vault_view() except AttributeError: pass
def upload_ssh_key(host, username, password, ssh_key=None, ssh_key_file=None, protocol=None, port=None, certificate_verify=False): ''' Upload an ssh key for root to an ESXi host via http PUT. This function only works for ESXi, not vCenter. Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. :param host: The location of the ESXi Host :param username: Username to connect as :param password: Password for the ESXi web endpoint :param ssh_key: Public SSH key, will be added to authorized_keys on ESXi :param ssh_key_file: File containing the SSH key. Use 'ssh_key' or ssh_key_file, but not both. :param protocol: defaults to https, can be http if ssl is disabled on ESXi :param port: defaults to 443 for https :param certificate_verify: If true require that the SSL connection present a valid certificate :return: Dictionary with a 'status' key, True if upload is successful. If upload is unsuccessful, 'status' key will be False and an 'Error' key will have an informative message. CLI Example: .. code-block:: bash salt '*' vsphere.upload_ssh_key my.esxi.host root bad-password ssh_key_file='/etc/salt/my_keys/my_key.pub' ''' if protocol is None: protocol = 'https' if port is None: port = 443 url = '{0}://{1}:{2}/host/ssh_root_authorized_keys'.format(protocol, host, port) ret = {} result = None try: if ssh_key: result = salt.utils.http.query(url, status=True, text=True, method='PUT', username=username, password=password, data=ssh_key, verify_ssl=certificate_verify) elif ssh_key_file: result = salt.utils.http.query(url, status=True, text=True, method='PUT', username=username, password=password, data_file=ssh_key_file, data_render=False, verify_ssl=certificate_verify) if result.get('status') == 200: ret['status'] = True else: ret['status'] = False ret['Error'] = result['error'] except Exception as msg: ret['status'] = False ret['Error'] = msg return ret
Upload an ssh key for root to an ESXi host via http PUT. This function only works for ESXi, not vCenter. Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. :param host: The location of the ESXi Host :param username: Username to connect as :param password: Password for the ESXi web endpoint :param ssh_key: Public SSH key, will be added to authorized_keys on ESXi :param ssh_key_file: File containing the SSH key. Use 'ssh_key' or ssh_key_file, but not both. :param protocol: defaults to https, can be http if ssl is disabled on ESXi :param port: defaults to 443 for https :param certificate_verify: If true require that the SSL connection present a valid certificate :return: Dictionary with a 'status' key, True if upload is successful. If upload is unsuccessful, 'status' key will be False and an 'Error' key will have an informative message. CLI Example: .. code-block:: bash salt '*' vsphere.upload_ssh_key my.esxi.host root bad-password ssh_key_file='/etc/salt/my_keys/my_key.pub'
Below is the the instruction that describes the task: ### Input: Upload an ssh key for root to an ESXi host via http PUT. This function only works for ESXi, not vCenter. Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. :param host: The location of the ESXi Host :param username: Username to connect as :param password: Password for the ESXi web endpoint :param ssh_key: Public SSH key, will be added to authorized_keys on ESXi :param ssh_key_file: File containing the SSH key. Use 'ssh_key' or ssh_key_file, but not both. :param protocol: defaults to https, can be http if ssl is disabled on ESXi :param port: defaults to 443 for https :param certificate_verify: If true require that the SSL connection present a valid certificate :return: Dictionary with a 'status' key, True if upload is successful. If upload is unsuccessful, 'status' key will be False and an 'Error' key will have an informative message. CLI Example: .. code-block:: bash salt '*' vsphere.upload_ssh_key my.esxi.host root bad-password ssh_key_file='/etc/salt/my_keys/my_key.pub' ### Response: def upload_ssh_key(host, username, password, ssh_key=None, ssh_key_file=None, protocol=None, port=None, certificate_verify=False): ''' Upload an ssh key for root to an ESXi host via http PUT. This function only works for ESXi, not vCenter. Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. :param host: The location of the ESXi Host :param username: Username to connect as :param password: Password for the ESXi web endpoint :param ssh_key: Public SSH key, will be added to authorized_keys on ESXi :param ssh_key_file: File containing the SSH key. Use 'ssh_key' or ssh_key_file, but not both. :param protocol: defaults to https, can be http if ssl is disabled on ESXi :param port: defaults to 443 for https :param certificate_verify: If true require that the SSL connection present a valid certificate :return: Dictionary with a 'status' key, True if upload is successful. If upload is unsuccessful, 'status' key will be False and an 'Error' key will have an informative message. CLI Example: .. code-block:: bash salt '*' vsphere.upload_ssh_key my.esxi.host root bad-password ssh_key_file='/etc/salt/my_keys/my_key.pub' ''' if protocol is None: protocol = 'https' if port is None: port = 443 url = '{0}://{1}:{2}/host/ssh_root_authorized_keys'.format(protocol, host, port) ret = {} result = None try: if ssh_key: result = salt.utils.http.query(url, status=True, text=True, method='PUT', username=username, password=password, data=ssh_key, verify_ssl=certificate_verify) elif ssh_key_file: result = salt.utils.http.query(url, status=True, text=True, method='PUT', username=username, password=password, data_file=ssh_key_file, data_render=False, verify_ssl=certificate_verify) if result.get('status') == 200: ret['status'] = True else: ret['status'] = False ret['Error'] = result['error'] except Exception as msg: ret['status'] = False ret['Error'] = msg return ret
def keltner(self, n, dev, array=False): """肯特纳通道""" mid = self.sma(n, array) atr = self.atr(n, array) up = mid + atr * dev down = mid - atr * dev return up, down
肯特纳通道
Below is the the instruction that describes the task: ### Input: 肯特纳通道 ### Response: def keltner(self, n, dev, array=False): """肯特纳通道""" mid = self.sma(n, array) atr = self.atr(n, array) up = mid + atr * dev down = mid - atr * dev return up, down
def remove_links(text): """ Helper function to remove the links from the input text Args: text (str): A string Returns: str: the same text, but with any substring that matches the regex for a link removed and replaced with a space Example: >>> from tweet_parser.getter_methods.tweet_text import remove_links >>> text = "lorem ipsum dolor https://twitter.com/RobotPrincessFi" >>> remove_links(text) 'lorem ipsum dolor ' """ tco_link_regex = re.compile("https?://t.co/[A-z0-9].*") generic_link_regex = re.compile("(https?://)?(\w*[.]\w+)+([/?=&]+\w+)*") remove_tco = re.sub(tco_link_regex, " ", text) remove_generic = re.sub(generic_link_regex, " ", remove_tco) return remove_generic
Helper function to remove the links from the input text Args: text (str): A string Returns: str: the same text, but with any substring that matches the regex for a link removed and replaced with a space Example: >>> from tweet_parser.getter_methods.tweet_text import remove_links >>> text = "lorem ipsum dolor https://twitter.com/RobotPrincessFi" >>> remove_links(text) 'lorem ipsum dolor '
Below is the the instruction that describes the task: ### Input: Helper function to remove the links from the input text Args: text (str): A string Returns: str: the same text, but with any substring that matches the regex for a link removed and replaced with a space Example: >>> from tweet_parser.getter_methods.tweet_text import remove_links >>> text = "lorem ipsum dolor https://twitter.com/RobotPrincessFi" >>> remove_links(text) 'lorem ipsum dolor ' ### Response: def remove_links(text): """ Helper function to remove the links from the input text Args: text (str): A string Returns: str: the same text, but with any substring that matches the regex for a link removed and replaced with a space Example: >>> from tweet_parser.getter_methods.tweet_text import remove_links >>> text = "lorem ipsum dolor https://twitter.com/RobotPrincessFi" >>> remove_links(text) 'lorem ipsum dolor ' """ tco_link_regex = re.compile("https?://t.co/[A-z0-9].*") generic_link_regex = re.compile("(https?://)?(\w*[.]\w+)+([/?=&]+\w+)*") remove_tco = re.sub(tco_link_regex, " ", text) remove_generic = re.sub(generic_link_regex, " ", remove_tco) return remove_generic
def sim(self, key, size=None): ''' key: memory address(int) or register name(str) size: size of object in bytes ''' project = load_project() if key in project.arch.registers: if size is None: size = project.arch.registers[key][1] size *= 8 s = claripy.BVS("angrdbg_reg_" + str(key), size) setattr(self.state.regs, key, s) self.symbolics[key] = (s, size) elif isinstance(key, int) or isinstance(key, long): if size is None: size = project.arch.bits else: size *= 8 s = claripy.BVS("angrdbg_mem_" + hex(key), size) self.state.memory.store(key, s) self.symbolics[key] = (s, size) elif isinstance(key, claripy.ast.bv.BV): key = self.state.solver.eval(key, cast_to=int) self.sim(key, size) else: raise ValueError( "key must be a register name or a memory address, not %s" % str( type(key))) return key
key: memory address(int) or register name(str) size: size of object in bytes
Below is the the instruction that describes the task: ### Input: key: memory address(int) or register name(str) size: size of object in bytes ### Response: def sim(self, key, size=None): ''' key: memory address(int) or register name(str) size: size of object in bytes ''' project = load_project() if key in project.arch.registers: if size is None: size = project.arch.registers[key][1] size *= 8 s = claripy.BVS("angrdbg_reg_" + str(key), size) setattr(self.state.regs, key, s) self.symbolics[key] = (s, size) elif isinstance(key, int) or isinstance(key, long): if size is None: size = project.arch.bits else: size *= 8 s = claripy.BVS("angrdbg_mem_" + hex(key), size) self.state.memory.store(key, s) self.symbolics[key] = (s, size) elif isinstance(key, claripy.ast.bv.BV): key = self.state.solver.eval(key, cast_to=int) self.sim(key, size) else: raise ValueError( "key must be a register name or a memory address, not %s" % str( type(key))) return key
def is_dataset_on_gcs(dataset_name): """If the dataset is available on the GCS bucket gs://tfds-data/datasets.""" dir_name = posixpath.join(GCS_DATASETS_DIR, dataset_name) return len(gcs_files(prefix_filter=dir_name)) > 2
If the dataset is available on the GCS bucket gs://tfds-data/datasets.
Below is the the instruction that describes the task: ### Input: If the dataset is available on the GCS bucket gs://tfds-data/datasets. ### Response: def is_dataset_on_gcs(dataset_name): """If the dataset is available on the GCS bucket gs://tfds-data/datasets.""" dir_name = posixpath.join(GCS_DATASETS_DIR, dataset_name) return len(gcs_files(prefix_filter=dir_name)) > 2
def export(self, top=True): """Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation """ out = [] if top: out.append(self._internal_name) out.append(self._to_str(self.number_of_records_per_hour)) out.append(self._to_str(self.data_period_name_or_description)) out.append(self._to_str(self.data_period_start_day_of_week)) out.append(self._to_str(self.data_period_start_day)) out.append(self._to_str(self.data_period_end_day)) return ",".join(out)
Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation
Below is the the instruction that describes the task: ### Input: Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation ### Response: def export(self, top=True): """Exports object to its string representation. Args: top (bool): if True appends `internal_name` before values. All non list objects should be exported with value top=True, all list objects, that are embedded in as fields inlist objects should be exported with `top`=False Returns: str: The objects string representation """ out = [] if top: out.append(self._internal_name) out.append(self._to_str(self.number_of_records_per_hour)) out.append(self._to_str(self.data_period_name_or_description)) out.append(self._to_str(self.data_period_start_day_of_week)) out.append(self._to_str(self.data_period_start_day)) out.append(self._to_str(self.data_period_end_day)) return ",".join(out)
def atexit_register(func): """ Uses either uwsgi's atexit mechanism, or atexit from the stdlib. When running under uwsgi, using their atexit handler is more reliable, especially when using gevent :param func: the function to call at exit """ try: import uwsgi orig = getattr(uwsgi, "atexit", None) def uwsgi_atexit(): if callable(orig): orig() func() uwsgi.atexit = uwsgi_atexit except ImportError: atexit.register(func)
Uses either uwsgi's atexit mechanism, or atexit from the stdlib. When running under uwsgi, using their atexit handler is more reliable, especially when using gevent :param func: the function to call at exit
Below is the the instruction that describes the task: ### Input: Uses either uwsgi's atexit mechanism, or atexit from the stdlib. When running under uwsgi, using their atexit handler is more reliable, especially when using gevent :param func: the function to call at exit ### Response: def atexit_register(func): """ Uses either uwsgi's atexit mechanism, or atexit from the stdlib. When running under uwsgi, using their atexit handler is more reliable, especially when using gevent :param func: the function to call at exit """ try: import uwsgi orig = getattr(uwsgi, "atexit", None) def uwsgi_atexit(): if callable(orig): orig() func() uwsgi.atexit = uwsgi_atexit except ImportError: atexit.register(func)
def save(self, clean=True): """Serialize into raw representation. Clears the dirty bit by default. Args: clean (bool): Whether to clear the dirty bit. Returns: dict: Raw. """ ret = {} if clean: self._dirty = False else: ret['_dirty'] = self._dirty return ret
Serialize into raw representation. Clears the dirty bit by default. Args: clean (bool): Whether to clear the dirty bit. Returns: dict: Raw.
Below is the the instruction that describes the task: ### Input: Serialize into raw representation. Clears the dirty bit by default. Args: clean (bool): Whether to clear the dirty bit. Returns: dict: Raw. ### Response: def save(self, clean=True): """Serialize into raw representation. Clears the dirty bit by default. Args: clean (bool): Whether to clear the dirty bit. Returns: dict: Raw. """ ret = {} if clean: self._dirty = False else: ret['_dirty'] = self._dirty return ret
def _build_query(self, uri, params=None, action_token_type=None): """Prepare query string""" if params is None: params = QueryParams() params['response_format'] = 'json' session_token = None if action_token_type in self._action_tokens: # Favor action token using_action_token = True session_token = self._action_tokens[action_token_type] else: using_action_token = False if self._session: session_token = self._session['session_token'] if session_token: params['session_token'] = session_token # make order of parameters predictable for testing keys = list(params.keys()) keys.sort() query = urlencode([tuple([key, params[key]]) for key in keys]) if not using_action_token and self._session: secret_key_mod = int(self._session['secret_key']) % 256 signature_base = (str(secret_key_mod) + self._session['time'] + uri + '?' + query).encode('ascii') query += '&signature=' + hashlib.md5(signature_base).hexdigest() return query
Prepare query string
Below is the the instruction that describes the task: ### Input: Prepare query string ### Response: def _build_query(self, uri, params=None, action_token_type=None): """Prepare query string""" if params is None: params = QueryParams() params['response_format'] = 'json' session_token = None if action_token_type in self._action_tokens: # Favor action token using_action_token = True session_token = self._action_tokens[action_token_type] else: using_action_token = False if self._session: session_token = self._session['session_token'] if session_token: params['session_token'] = session_token # make order of parameters predictable for testing keys = list(params.keys()) keys.sort() query = urlencode([tuple([key, params[key]]) for key in keys]) if not using_action_token and self._session: secret_key_mod = int(self._session['secret_key']) % 256 signature_base = (str(secret_key_mod) + self._session['time'] + uri + '?' + query).encode('ascii') query += '&signature=' + hashlib.md5(signature_base).hexdigest() return query
def device_firmware_str(self, indent): """Convenience to string method. """ host_build_ns = self.host_firmware_build_timestamp host_build_s = datetime.datetime.utcfromtimestamp(host_build_ns/1000000000) if host_build_ns != None else None wifi_build_ns = self.wifi_firmware_build_timestamp wifi_build_s = datetime.datetime.utcfromtimestamp(wifi_build_ns/1000000000) if wifi_build_ns != None else None s = "Host Firmware Build Timestamp: {} ({} UTC)\n".format(host_build_ns, host_build_s) s += indent + "Host Firmware Build Version: {}\n".format(self.host_firmware_version) s += indent + "Wifi Firmware Build Timestamp: {} ({} UTC)\n".format(wifi_build_ns, wifi_build_s) s += indent + "Wifi Firmware Build Version: {}\n".format(self.wifi_firmware_version) return s
Convenience to string method.
Below is the the instruction that describes the task: ### Input: Convenience to string method. ### Response: def device_firmware_str(self, indent): """Convenience to string method. """ host_build_ns = self.host_firmware_build_timestamp host_build_s = datetime.datetime.utcfromtimestamp(host_build_ns/1000000000) if host_build_ns != None else None wifi_build_ns = self.wifi_firmware_build_timestamp wifi_build_s = datetime.datetime.utcfromtimestamp(wifi_build_ns/1000000000) if wifi_build_ns != None else None s = "Host Firmware Build Timestamp: {} ({} UTC)\n".format(host_build_ns, host_build_s) s += indent + "Host Firmware Build Version: {}\n".format(self.host_firmware_version) s += indent + "Wifi Firmware Build Timestamp: {} ({} UTC)\n".format(wifi_build_ns, wifi_build_s) s += indent + "Wifi Firmware Build Version: {}\n".format(self.wifi_firmware_version) return s
def register(cls, archive_table, engine): """ :param archive_table: the model for the users archive table :param engine: the database engine :param version_col_names: strings which correspond to columns that versioning will pivot \ around. These columns must have a unique constraint set on them. """ version_col_names = cls.version_columns if not version_col_names: raise LogTableCreationError('Need to specify version cols in cls.version_columns') if cls.ignore_columns is None: cls.ignore_columns = set() cls.ignore_columns.add('version_id') version_cols = [getattr(cls, col_name, None) for col_name in version_col_names] cls._validate(engine, *version_cols) archive_table._validate(engine, *version_cols) cls.ArchiveTable = archive_table
:param archive_table: the model for the users archive table :param engine: the database engine :param version_col_names: strings which correspond to columns that versioning will pivot \ around. These columns must have a unique constraint set on them.
Below is the the instruction that describes the task: ### Input: :param archive_table: the model for the users archive table :param engine: the database engine :param version_col_names: strings which correspond to columns that versioning will pivot \ around. These columns must have a unique constraint set on them. ### Response: def register(cls, archive_table, engine): """ :param archive_table: the model for the users archive table :param engine: the database engine :param version_col_names: strings which correspond to columns that versioning will pivot \ around. These columns must have a unique constraint set on them. """ version_col_names = cls.version_columns if not version_col_names: raise LogTableCreationError('Need to specify version cols in cls.version_columns') if cls.ignore_columns is None: cls.ignore_columns = set() cls.ignore_columns.add('version_id') version_cols = [getattr(cls, col_name, None) for col_name in version_col_names] cls._validate(engine, *version_cols) archive_table._validate(engine, *version_cols) cls.ArchiveTable = archive_table
def DAS(cpu): """ Decimal adjusts AL after subtraction. Adjusts the result of the subtraction of two packed BCD values to create a packed BCD result. The AL register is the implied source and destination operand. If a decimal borrow is detected, the CF and AF flags are set accordingly. This instruction is not valid in 64-bit mode. The SF, ZF, and PF flags are set according to the result.:: IF (AL AND 0FH) > 9 OR AF = 1 THEN AL = AL - 6; CF = CF OR BorrowFromLastSubtraction; (* CF OR borrow from AL = AL - 6 *) AF = 1; ELSE AF = 0; FI; IF ((AL > 99H) or OLD_CF = 1) THEN AL = AL - 60H; CF = 1; :param cpu: current CPU. """ oldAL = cpu.AL oldCF = cpu.CF cpu.AF = Operators.OR((cpu.AL & 0x0f) > 9, cpu.AF) cpu.AL = Operators.ITEBV(8, cpu.AF, cpu.AL - 6, cpu.AL) cpu.CF = Operators.ITE(cpu.AF, Operators.OR(oldCF, cpu.AL > oldAL), cpu.CF) cpu.CF = Operators.ITE(Operators.OR(oldAL > 0x99, oldCF), True, cpu.CF) cpu.AL = Operators.ITEBV(8, Operators.OR(oldAL > 0x99, oldCF), cpu.AL - 0x60, cpu.AL) # """ if (cpu.AL & 0x0f) > 9 or cpu.AF: cpu.AL = cpu.AL - 6; cpu.CF = Operators.OR(oldCF, cpu.AL > oldAL) cpu.AF = True else: cpu.AF = False if ((oldAL > 0x99) or oldCF): cpu.AL = cpu.AL - 0x60 cpu.CF = True """ cpu.ZF = cpu.AL == 0 cpu.SF = (cpu.AL & 0x80) != 0 cpu.PF = cpu._calculate_parity_flag(cpu.AL)
Decimal adjusts AL after subtraction. Adjusts the result of the subtraction of two packed BCD values to create a packed BCD result. The AL register is the implied source and destination operand. If a decimal borrow is detected, the CF and AF flags are set accordingly. This instruction is not valid in 64-bit mode. The SF, ZF, and PF flags are set according to the result.:: IF (AL AND 0FH) > 9 OR AF = 1 THEN AL = AL - 6; CF = CF OR BorrowFromLastSubtraction; (* CF OR borrow from AL = AL - 6 *) AF = 1; ELSE AF = 0; FI; IF ((AL > 99H) or OLD_CF = 1) THEN AL = AL - 60H; CF = 1; :param cpu: current CPU.
Below is the the instruction that describes the task: ### Input: Decimal adjusts AL after subtraction. Adjusts the result of the subtraction of two packed BCD values to create a packed BCD result. The AL register is the implied source and destination operand. If a decimal borrow is detected, the CF and AF flags are set accordingly. This instruction is not valid in 64-bit mode. The SF, ZF, and PF flags are set according to the result.:: IF (AL AND 0FH) > 9 OR AF = 1 THEN AL = AL - 6; CF = CF OR BorrowFromLastSubtraction; (* CF OR borrow from AL = AL - 6 *) AF = 1; ELSE AF = 0; FI; IF ((AL > 99H) or OLD_CF = 1) THEN AL = AL - 60H; CF = 1; :param cpu: current CPU. ### Response: def DAS(cpu): """ Decimal adjusts AL after subtraction. Adjusts the result of the subtraction of two packed BCD values to create a packed BCD result. The AL register is the implied source and destination operand. If a decimal borrow is detected, the CF and AF flags are set accordingly. This instruction is not valid in 64-bit mode. The SF, ZF, and PF flags are set according to the result.:: IF (AL AND 0FH) > 9 OR AF = 1 THEN AL = AL - 6; CF = CF OR BorrowFromLastSubtraction; (* CF OR borrow from AL = AL - 6 *) AF = 1; ELSE AF = 0; FI; IF ((AL > 99H) or OLD_CF = 1) THEN AL = AL - 60H; CF = 1; :param cpu: current CPU. """ oldAL = cpu.AL oldCF = cpu.CF cpu.AF = Operators.OR((cpu.AL & 0x0f) > 9, cpu.AF) cpu.AL = Operators.ITEBV(8, cpu.AF, cpu.AL - 6, cpu.AL) cpu.CF = Operators.ITE(cpu.AF, Operators.OR(oldCF, cpu.AL > oldAL), cpu.CF) cpu.CF = Operators.ITE(Operators.OR(oldAL > 0x99, oldCF), True, cpu.CF) cpu.AL = Operators.ITEBV(8, Operators.OR(oldAL > 0x99, oldCF), cpu.AL - 0x60, cpu.AL) # """ if (cpu.AL & 0x0f) > 9 or cpu.AF: cpu.AL = cpu.AL - 6; cpu.CF = Operators.OR(oldCF, cpu.AL > oldAL) cpu.AF = True else: cpu.AF = False if ((oldAL > 0x99) or oldCF): cpu.AL = cpu.AL - 0x60 cpu.CF = True """ cpu.ZF = cpu.AL == 0 cpu.SF = (cpu.AL & 0x80) != 0 cpu.PF = cpu._calculate_parity_flag(cpu.AL)
def parse_meta(filename, data): """ Parse `data` to EPublication. Args: filename (str): Used to choose right parser based at suffix. data (str): Content of the metadata file. Returns: EPublication: object. """ if "." not in filename: raise MetaParsingException( "Can't recognize type of your metadata ('%s')!" % filename ) suffix = filename.rsplit(".", 1)[1].lower() if suffix not in SUPPORTED_FILES: raise MetaParsingException("Can't parse file of type '%s'!" % suffix) fp = validator.FieldParser() for key, val in SUPPORTED_FILES[suffix](data).items(): fp.process(key, val) return fp.get_epublication()
Parse `data` to EPublication. Args: filename (str): Used to choose right parser based at suffix. data (str): Content of the metadata file. Returns: EPublication: object.
Below is the the instruction that describes the task: ### Input: Parse `data` to EPublication. Args: filename (str): Used to choose right parser based at suffix. data (str): Content of the metadata file. Returns: EPublication: object. ### Response: def parse_meta(filename, data): """ Parse `data` to EPublication. Args: filename (str): Used to choose right parser based at suffix. data (str): Content of the metadata file. Returns: EPublication: object. """ if "." not in filename: raise MetaParsingException( "Can't recognize type of your metadata ('%s')!" % filename ) suffix = filename.rsplit(".", 1)[1].lower() if suffix not in SUPPORTED_FILES: raise MetaParsingException("Can't parse file of type '%s'!" % suffix) fp = validator.FieldParser() for key, val in SUPPORTED_FILES[suffix](data).items(): fp.process(key, val) return fp.get_epublication()
def convert_yielded(yielded): """Convert a yielded object into a `.Future`. The default implementation accepts lists, dictionaries, and Futures. If the `~functools.singledispatch` library is available, this function may be extended to support additional types. For example:: @convert_yielded.register(asyncio.Future) def _(asyncio_future): return tornado.platform.asyncio.to_tornado_future(asyncio_future) .. versionadded:: 4.1 """ # Lists and dicts containing YieldPoints were handled earlier. if yielded is None: return moment elif isinstance(yielded, (list, dict)): return multi(yielded) elif is_future(yielded): return yielded elif isawaitable(yielded): return _wrap_awaitable(yielded) else: raise BadYieldError("yielded unknown object %r" % (yielded,))
Convert a yielded object into a `.Future`. The default implementation accepts lists, dictionaries, and Futures. If the `~functools.singledispatch` library is available, this function may be extended to support additional types. For example:: @convert_yielded.register(asyncio.Future) def _(asyncio_future): return tornado.platform.asyncio.to_tornado_future(asyncio_future) .. versionadded:: 4.1
Below is the the instruction that describes the task: ### Input: Convert a yielded object into a `.Future`. The default implementation accepts lists, dictionaries, and Futures. If the `~functools.singledispatch` library is available, this function may be extended to support additional types. For example:: @convert_yielded.register(asyncio.Future) def _(asyncio_future): return tornado.platform.asyncio.to_tornado_future(asyncio_future) .. versionadded:: 4.1 ### Response: def convert_yielded(yielded): """Convert a yielded object into a `.Future`. The default implementation accepts lists, dictionaries, and Futures. If the `~functools.singledispatch` library is available, this function may be extended to support additional types. For example:: @convert_yielded.register(asyncio.Future) def _(asyncio_future): return tornado.platform.asyncio.to_tornado_future(asyncio_future) .. versionadded:: 4.1 """ # Lists and dicts containing YieldPoints were handled earlier. if yielded is None: return moment elif isinstance(yielded, (list, dict)): return multi(yielded) elif is_future(yielded): return yielded elif isawaitable(yielded): return _wrap_awaitable(yielded) else: raise BadYieldError("yielded unknown object %r" % (yielded,))
def save(self): """Save to database if anything has changed since last load""" if ( self._new or (self._validID() and self._changed) or (self._updated and self._changed > self._updated) ): # Don't save if we have not loaded existing data! self._saveDB() return True return False
Save to database if anything has changed since last load
Below is the the instruction that describes the task: ### Input: Save to database if anything has changed since last load ### Response: def save(self): """Save to database if anything has changed since last load""" if ( self._new or (self._validID() and self._changed) or (self._updated and self._changed > self._updated) ): # Don't save if we have not loaded existing data! self._saveDB() return True return False
def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS): """Ensures that flags are not None during program execution. Recommended usage: if __name__ == '__main__': flags.mark_flags_as_required(['flag1', 'flag2', 'flag3']) app.run() Args: flag_names: Sequence[str], names of the flags. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. Raises: AttributeError: If any of flag name has not already been defined as a flag. """ for flag_name in flag_names: mark_flag_as_required(flag_name, flag_values)
Ensures that flags are not None during program execution. Recommended usage: if __name__ == '__main__': flags.mark_flags_as_required(['flag1', 'flag2', 'flag3']) app.run() Args: flag_names: Sequence[str], names of the flags. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. Raises: AttributeError: If any of flag name has not already been defined as a flag.
Below is the the instruction that describes the task: ### Input: Ensures that flags are not None during program execution. Recommended usage: if __name__ == '__main__': flags.mark_flags_as_required(['flag1', 'flag2', 'flag3']) app.run() Args: flag_names: Sequence[str], names of the flags. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. Raises: AttributeError: If any of flag name has not already been defined as a flag. ### Response: def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS): """Ensures that flags are not None during program execution. Recommended usage: if __name__ == '__main__': flags.mark_flags_as_required(['flag1', 'flag2', 'flag3']) app.run() Args: flag_names: Sequence[str], names of the flags. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. Raises: AttributeError: If any of flag name has not already been defined as a flag. """ for flag_name in flag_names: mark_flag_as_required(flag_name, flag_values)
def make_common_entry(plist, pyver, suffix, req_ver): """Generate Python interpreter version entries for 2.x or 3.x series.""" prefix = "Python {pyver}.x{suffix}".format(pyver=pyver, suffix=suffix) plist.append("{prefix}{ver}".format(prefix=prefix, ver=ops_to_words(req_ver)))
Generate Python interpreter version entries for 2.x or 3.x series.
Below is the the instruction that describes the task: ### Input: Generate Python interpreter version entries for 2.x or 3.x series. ### Response: def make_common_entry(plist, pyver, suffix, req_ver): """Generate Python interpreter version entries for 2.x or 3.x series.""" prefix = "Python {pyver}.x{suffix}".format(pyver=pyver, suffix=suffix) plist.append("{prefix}{ver}".format(prefix=prefix, ver=ops_to_words(req_ver)))
def to_qubo(self): """Convert a binary quadratic model to QUBO format. If the binary quadratic model's vartype is not :class:`.Vartype.BINARY`, values are converted. Returns: tuple: 2-tuple of form (`biases`, `offset`), where `biases` is a dict in which keys are pairs of variables and values are the associated linear or quadratic bias and `offset` is a number that represents the constant offset of the binary quadratic model. Examples: This example converts a binary quadratic model with spin variables to QUBO format with binary variables. >>> import dimod >>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5}, ... {(0, 1): .5, (1, 2): 1.5}, ... 1.4, ... dimod.SPIN) >>> model.to_qubo() # doctest: +SKIP ({(0, 0): 1.0, (0, 1): 2.0, (1, 1): -6.0, (1, 2): 6.0, (2, 2): -2.0}, 2.9) """ qubo = dict(self.binary.quadratic) qubo.update(((v, v), bias) for v, bias in iteritems(self.binary.linear)) return qubo, self.binary.offset
Convert a binary quadratic model to QUBO format. If the binary quadratic model's vartype is not :class:`.Vartype.BINARY`, values are converted. Returns: tuple: 2-tuple of form (`biases`, `offset`), where `biases` is a dict in which keys are pairs of variables and values are the associated linear or quadratic bias and `offset` is a number that represents the constant offset of the binary quadratic model. Examples: This example converts a binary quadratic model with spin variables to QUBO format with binary variables. >>> import dimod >>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5}, ... {(0, 1): .5, (1, 2): 1.5}, ... 1.4, ... dimod.SPIN) >>> model.to_qubo() # doctest: +SKIP ({(0, 0): 1.0, (0, 1): 2.0, (1, 1): -6.0, (1, 2): 6.0, (2, 2): -2.0}, 2.9)
Below is the the instruction that describes the task: ### Input: Convert a binary quadratic model to QUBO format. If the binary quadratic model's vartype is not :class:`.Vartype.BINARY`, values are converted. Returns: tuple: 2-tuple of form (`biases`, `offset`), where `biases` is a dict in which keys are pairs of variables and values are the associated linear or quadratic bias and `offset` is a number that represents the constant offset of the binary quadratic model. Examples: This example converts a binary quadratic model with spin variables to QUBO format with binary variables. >>> import dimod >>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5}, ... {(0, 1): .5, (1, 2): 1.5}, ... 1.4, ... dimod.SPIN) >>> model.to_qubo() # doctest: +SKIP ({(0, 0): 1.0, (0, 1): 2.0, (1, 1): -6.0, (1, 2): 6.0, (2, 2): -2.0}, 2.9) ### Response: def to_qubo(self): """Convert a binary quadratic model to QUBO format. If the binary quadratic model's vartype is not :class:`.Vartype.BINARY`, values are converted. Returns: tuple: 2-tuple of form (`biases`, `offset`), where `biases` is a dict in which keys are pairs of variables and values are the associated linear or quadratic bias and `offset` is a number that represents the constant offset of the binary quadratic model. Examples: This example converts a binary quadratic model with spin variables to QUBO format with binary variables. >>> import dimod >>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5}, ... {(0, 1): .5, (1, 2): 1.5}, ... 1.4, ... dimod.SPIN) >>> model.to_qubo() # doctest: +SKIP ({(0, 0): 1.0, (0, 1): 2.0, (1, 1): -6.0, (1, 2): 6.0, (2, 2): -2.0}, 2.9) """ qubo = dict(self.binary.quadratic) qubo.update(((v, v), bias) for v, bias in iteritems(self.binary.linear)) return qubo, self.binary.offset
def get_option_scope_help_info(self, option_registrations_iter): """Returns an OptionScopeHelpInfo for the options registered with the (args, kwargs) pairs.""" basic_options = [] recursive_options = [] advanced_options = [] # Sort the arguments, so we display the help in alphabetical order. for args, kwargs in sorted(option_registrations_iter): ohi = self.get_option_help_info(args, kwargs) if kwargs.get('advanced'): advanced_options.append(ohi) elif kwargs.get('recursive') and not kwargs.get('recursive_root'): recursive_options.append(ohi) else: basic_options.append(ohi) return OptionScopeHelpInfo(scope=self._scope, basic=basic_options, recursive=recursive_options, advanced=advanced_options)
Returns an OptionScopeHelpInfo for the options registered with the (args, kwargs) pairs.
Below is the the instruction that describes the task: ### Input: Returns an OptionScopeHelpInfo for the options registered with the (args, kwargs) pairs. ### Response: def get_option_scope_help_info(self, option_registrations_iter): """Returns an OptionScopeHelpInfo for the options registered with the (args, kwargs) pairs.""" basic_options = [] recursive_options = [] advanced_options = [] # Sort the arguments, so we display the help in alphabetical order. for args, kwargs in sorted(option_registrations_iter): ohi = self.get_option_help_info(args, kwargs) if kwargs.get('advanced'): advanced_options.append(ohi) elif kwargs.get('recursive') and not kwargs.get('recursive_root'): recursive_options.append(ohi) else: basic_options.append(ohi) return OptionScopeHelpInfo(scope=self._scope, basic=basic_options, recursive=recursive_options, advanced=advanced_options)
def versionadded(reason="", version=""): """ This decorator can be used to insert a "versionadded" directive in your function/class docstring in order to documents the version of the project which adds this new functionality in your library. :param str reason: Reason message which documents the addition in your library (can be omitted). :param str version: Version of your project which adds this feature. If you follow the `Semantic Versioning <https://semver.org/>`_, the version number has the format "MAJOR.MINOR.PATCH", and, in the case of a new functionality, the "PATCH" component should be "0". :return: the decorated function. """ adapter = SphinxAdapter('versionadded', reason=reason, version=version) # noinspection PyUnusedLocal @wrapt.decorator(adapter=adapter) def wrapper(wrapped, instance, args, kwargs): return wrapped(*args, **kwargs) return wrapper
This decorator can be used to insert a "versionadded" directive in your function/class docstring in order to documents the version of the project which adds this new functionality in your library. :param str reason: Reason message which documents the addition in your library (can be omitted). :param str version: Version of your project which adds this feature. If you follow the `Semantic Versioning <https://semver.org/>`_, the version number has the format "MAJOR.MINOR.PATCH", and, in the case of a new functionality, the "PATCH" component should be "0". :return: the decorated function.
Below is the the instruction that describes the task: ### Input: This decorator can be used to insert a "versionadded" directive in your function/class docstring in order to documents the version of the project which adds this new functionality in your library. :param str reason: Reason message which documents the addition in your library (can be omitted). :param str version: Version of your project which adds this feature. If you follow the `Semantic Versioning <https://semver.org/>`_, the version number has the format "MAJOR.MINOR.PATCH", and, in the case of a new functionality, the "PATCH" component should be "0". :return: the decorated function. ### Response: def versionadded(reason="", version=""): """ This decorator can be used to insert a "versionadded" directive in your function/class docstring in order to documents the version of the project which adds this new functionality in your library. :param str reason: Reason message which documents the addition in your library (can be omitted). :param str version: Version of your project which adds this feature. If you follow the `Semantic Versioning <https://semver.org/>`_, the version number has the format "MAJOR.MINOR.PATCH", and, in the case of a new functionality, the "PATCH" component should be "0". :return: the decorated function. """ adapter = SphinxAdapter('versionadded', reason=reason, version=version) # noinspection PyUnusedLocal @wrapt.decorator(adapter=adapter) def wrapper(wrapped, instance, args, kwargs): return wrapped(*args, **kwargs) return wrapper
def render_to_response(self, context=False): """Send file to client.""" f = self.openfile() wrapper = FileWrapper(f) response = HttpResponse(wrapper, content_type=self.content_type()) self.headers(response) return response
Send file to client.
Below is the the instruction that describes the task: ### Input: Send file to client. ### Response: def render_to_response(self, context=False): """Send file to client.""" f = self.openfile() wrapper = FileWrapper(f) response = HttpResponse(wrapper, content_type=self.content_type()) self.headers(response) return response
def search_all(self): '''a "show all" search that doesn't require a query''' # This should be your apis url for a search url = '...' # paginte get is what it sounds like, and what you want for multiple # pages of results results = self._paginate_get(url) if len(results) == 0: bot.info("No container collections found.") sys.exit(1) bot.info("Collections") # Here is how to create a simple table. You of course must parse your # custom result and form the fields in the table to be what you think # are important! rows = [] for result in results: if "containers" in result: for c in result['containers']: rows.append([ c['uri'], c['detail'] ]) bot.table(rows) return rows
a "show all" search that doesn't require a query
Below is the the instruction that describes the task: ### Input: a "show all" search that doesn't require a query ### Response: def search_all(self): '''a "show all" search that doesn't require a query''' # This should be your apis url for a search url = '...' # paginte get is what it sounds like, and what you want for multiple # pages of results results = self._paginate_get(url) if len(results) == 0: bot.info("No container collections found.") sys.exit(1) bot.info("Collections") # Here is how to create a simple table. You of course must parse your # custom result and form the fields in the table to be what you think # are important! rows = [] for result in results: if "containers" in result: for c in result['containers']: rows.append([ c['uri'], c['detail'] ]) bot.table(rows) return rows
def get_client_from_cli_profile(client_class, **kwargs): """Return a SDK client initialized with current CLI credentials, CLI default subscription and CLI default cloud. This method will fill automatically the following client parameters: - credentials - subscription_id - base_url Parameters provided in kwargs will override CLI parameters and be passed directly to the client. :Example: .. code:: python from azure.common.client_factory import get_client_from_cli_profile from azure.mgmt.compute import ComputeManagementClient client = get_client_from_cli_profile(ComputeManagementClient) .. versionadded:: 1.1.6 :param client_class: A SDK client class :return: An instantiated client :raises: ImportError if azure-cli-core package is not available """ cloud = get_cli_active_cloud() parameters = {} if 'credentials' not in kwargs or 'subscription_id' not in kwargs: resource, _ = _client_resource(client_class, cloud) credentials, subscription_id, tenant_id = get_azure_cli_credentials(resource=resource, with_tenant=True) parameters.update({ 'credentials': kwargs.get('credentials', credentials), 'subscription_id': kwargs.get('subscription_id', subscription_id) }) args = get_arg_spec(client_class.__init__).args if 'adla_job_dns_suffix' in args and 'adla_job_dns_suffix' not in kwargs: # Datalake # Let it raise here with AttributeError at worst, this would mean this cloud does not define # ADL endpoint and no manual suffix was given parameters['adla_job_dns_suffix'] = cloud.suffixes.azure_datalake_analytics_catalog_and_job_endpoint elif 'base_url' in args and 'base_url' not in kwargs: _, base_url = _client_resource(client_class, cloud) if base_url: parameters['base_url'] = base_url else: parameters['base_url'] = cloud.endpoints.resource_manager if 'tenant_id' in args and 'tenant_id' not in kwargs: parameters['tenant_id'] = tenant_id parameters.update(kwargs) return _instantiate_client(client_class, **parameters)
Return a SDK client initialized with current CLI credentials, CLI default subscription and CLI default cloud. This method will fill automatically the following client parameters: - credentials - subscription_id - base_url Parameters provided in kwargs will override CLI parameters and be passed directly to the client. :Example: .. code:: python from azure.common.client_factory import get_client_from_cli_profile from azure.mgmt.compute import ComputeManagementClient client = get_client_from_cli_profile(ComputeManagementClient) .. versionadded:: 1.1.6 :param client_class: A SDK client class :return: An instantiated client :raises: ImportError if azure-cli-core package is not available
Below is the the instruction that describes the task: ### Input: Return a SDK client initialized with current CLI credentials, CLI default subscription and CLI default cloud. This method will fill automatically the following client parameters: - credentials - subscription_id - base_url Parameters provided in kwargs will override CLI parameters and be passed directly to the client. :Example: .. code:: python from azure.common.client_factory import get_client_from_cli_profile from azure.mgmt.compute import ComputeManagementClient client = get_client_from_cli_profile(ComputeManagementClient) .. versionadded:: 1.1.6 :param client_class: A SDK client class :return: An instantiated client :raises: ImportError if azure-cli-core package is not available ### Response: def get_client_from_cli_profile(client_class, **kwargs): """Return a SDK client initialized with current CLI credentials, CLI default subscription and CLI default cloud. This method will fill automatically the following client parameters: - credentials - subscription_id - base_url Parameters provided in kwargs will override CLI parameters and be passed directly to the client. :Example: .. code:: python from azure.common.client_factory import get_client_from_cli_profile from azure.mgmt.compute import ComputeManagementClient client = get_client_from_cli_profile(ComputeManagementClient) .. versionadded:: 1.1.6 :param client_class: A SDK client class :return: An instantiated client :raises: ImportError if azure-cli-core package is not available """ cloud = get_cli_active_cloud() parameters = {} if 'credentials' not in kwargs or 'subscription_id' not in kwargs: resource, _ = _client_resource(client_class, cloud) credentials, subscription_id, tenant_id = get_azure_cli_credentials(resource=resource, with_tenant=True) parameters.update({ 'credentials': kwargs.get('credentials', credentials), 'subscription_id': kwargs.get('subscription_id', subscription_id) }) args = get_arg_spec(client_class.__init__).args if 'adla_job_dns_suffix' in args and 'adla_job_dns_suffix' not in kwargs: # Datalake # Let it raise here with AttributeError at worst, this would mean this cloud does not define # ADL endpoint and no manual suffix was given parameters['adla_job_dns_suffix'] = cloud.suffixes.azure_datalake_analytics_catalog_and_job_endpoint elif 'base_url' in args and 'base_url' not in kwargs: _, base_url = _client_resource(client_class, cloud) if base_url: parameters['base_url'] = base_url else: parameters['base_url'] = cloud.endpoints.resource_manager if 'tenant_id' in args and 'tenant_id' not in kwargs: parameters['tenant_id'] = tenant_id parameters.update(kwargs) return _instantiate_client(client_class, **parameters)
def initialize_plugin_in_mainwindow_layout(self): """ If this is the first time the plugin is shown, perform actions to initialize plugin position in Spyder's window layout. Use on_first_registration to define the actions to be run by your plugin """ if self.get_option('first_time', True): try: self.on_first_registration() except NotImplementedError: return self.set_option('first_time', False)
If this is the first time the plugin is shown, perform actions to initialize plugin position in Spyder's window layout. Use on_first_registration to define the actions to be run by your plugin
Below is the the instruction that describes the task: ### Input: If this is the first time the plugin is shown, perform actions to initialize plugin position in Spyder's window layout. Use on_first_registration to define the actions to be run by your plugin ### Response: def initialize_plugin_in_mainwindow_layout(self): """ If this is the first time the plugin is shown, perform actions to initialize plugin position in Spyder's window layout. Use on_first_registration to define the actions to be run by your plugin """ if self.get_option('first_time', True): try: self.on_first_registration() except NotImplementedError: return self.set_option('first_time', False)
def sanitize_filename( filename, replacement_text="", platform=None, max_len=_DEFAULT_MAX_FILENAME_LEN ): """Make a valid filename from a string. To make a valid filename the function does: - Replace invalid characters as file names included in the ``filename`` with the ``replacement_text``. Invalid characters are: - unprintable characters - |invalid_filename_chars| - for Windows only: |invalid_win_filename_chars| - Append underscore (``"_"``) at the tail of the name if sanitized name is one of the reserved names by the operating system. Args: filename (str or PathLike object): Filename to sanitize. replacement_text (str, optional): Replacement text for invalid characters. Defaults to ``""``. platform (str, optional): .. include:: platform.txt max_len (int, optional): The upper limit of the ``filename`` length. Truncate the name length if the ``filename`` length exceeds this value. Defaults to ``255``. Returns: Same type as the ``filename`` (str or PathLike object): Sanitized filename. Raises: ValueError: If the ``filename`` is an invalid filename. Example: :ref:`example-sanitize-filename` """ return FileNameSanitizer(platform=platform, max_len=max_len).sanitize( filename, replacement_text )
Make a valid filename from a string. To make a valid filename the function does: - Replace invalid characters as file names included in the ``filename`` with the ``replacement_text``. Invalid characters are: - unprintable characters - |invalid_filename_chars| - for Windows only: |invalid_win_filename_chars| - Append underscore (``"_"``) at the tail of the name if sanitized name is one of the reserved names by the operating system. Args: filename (str or PathLike object): Filename to sanitize. replacement_text (str, optional): Replacement text for invalid characters. Defaults to ``""``. platform (str, optional): .. include:: platform.txt max_len (int, optional): The upper limit of the ``filename`` length. Truncate the name length if the ``filename`` length exceeds this value. Defaults to ``255``. Returns: Same type as the ``filename`` (str or PathLike object): Sanitized filename. Raises: ValueError: If the ``filename`` is an invalid filename. Example: :ref:`example-sanitize-filename`
Below is the the instruction that describes the task: ### Input: Make a valid filename from a string. To make a valid filename the function does: - Replace invalid characters as file names included in the ``filename`` with the ``replacement_text``. Invalid characters are: - unprintable characters - |invalid_filename_chars| - for Windows only: |invalid_win_filename_chars| - Append underscore (``"_"``) at the tail of the name if sanitized name is one of the reserved names by the operating system. Args: filename (str or PathLike object): Filename to sanitize. replacement_text (str, optional): Replacement text for invalid characters. Defaults to ``""``. platform (str, optional): .. include:: platform.txt max_len (int, optional): The upper limit of the ``filename`` length. Truncate the name length if the ``filename`` length exceeds this value. Defaults to ``255``. Returns: Same type as the ``filename`` (str or PathLike object): Sanitized filename. Raises: ValueError: If the ``filename`` is an invalid filename. Example: :ref:`example-sanitize-filename` ### Response: def sanitize_filename( filename, replacement_text="", platform=None, max_len=_DEFAULT_MAX_FILENAME_LEN ): """Make a valid filename from a string. To make a valid filename the function does: - Replace invalid characters as file names included in the ``filename`` with the ``replacement_text``. Invalid characters are: - unprintable characters - |invalid_filename_chars| - for Windows only: |invalid_win_filename_chars| - Append underscore (``"_"``) at the tail of the name if sanitized name is one of the reserved names by the operating system. Args: filename (str or PathLike object): Filename to sanitize. replacement_text (str, optional): Replacement text for invalid characters. Defaults to ``""``. platform (str, optional): .. include:: platform.txt max_len (int, optional): The upper limit of the ``filename`` length. Truncate the name length if the ``filename`` length exceeds this value. Defaults to ``255``. Returns: Same type as the ``filename`` (str or PathLike object): Sanitized filename. Raises: ValueError: If the ``filename`` is an invalid filename. Example: :ref:`example-sanitize-filename` """ return FileNameSanitizer(platform=platform, max_len=max_len).sanitize( filename, replacement_text )
def select_as_memdb(self, table_name, columns=None, where=None, extra=None): """ Get data in the database and return fetched data as a in-memory |SimpleSQLite| instance. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as a |SimpleSQLite| instance that connected to in memory database. :rtype: |SimpleSQLite| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| """ table_schema = self.schema_extractor.fetch_table_schema(table_name) memdb = connect_memdb() memdb.create_table_from_tabledata( self.select_as_tabledata(table_name, columns, where, extra), primary_key=table_schema.primary_key, index_attrs=table_schema.index_list, ) return memdb
Get data in the database and return fetched data as a in-memory |SimpleSQLite| instance. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as a |SimpleSQLite| instance that connected to in memory database. :rtype: |SimpleSQLite| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error|
Below is the the instruction that describes the task: ### Input: Get data in the database and return fetched data as a in-memory |SimpleSQLite| instance. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as a |SimpleSQLite| instance that connected to in memory database. :rtype: |SimpleSQLite| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| ### Response: def select_as_memdb(self, table_name, columns=None, where=None, extra=None): """ Get data in the database and return fetched data as a in-memory |SimpleSQLite| instance. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as a |SimpleSQLite| instance that connected to in memory database. :rtype: |SimpleSQLite| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| """ table_schema = self.schema_extractor.fetch_table_schema(table_name) memdb = connect_memdb() memdb.create_table_from_tabledata( self.select_as_tabledata(table_name, columns, where, extra), primary_key=table_schema.primary_key, index_attrs=table_schema.index_list, ) return memdb
def random_walk_graph( self, prev_peer, prev_peer_degree, current_peer, current_peer_neighbors, con=None, path=None, peer_table=None ): """ Take one step from current_peer to a neighbor in current_peer_neighbors, based on Metropolis-Hastings Random Walk with Delayed Acceptance (MHRWDA). The basic idea is to reduce the probability (versus MH alone) that we transition to the previous node. We do so using the Metropolis-Hastings Random Walk with Delated Acceptance (MHRWDA) algorithm described in Lee, Xu, and Eun in SIGMETRICS 2012. Return the next peer. """ if path is None: path = self.atlasdb_path # the "next" current peer ret_current_peer = None ret_current_peer_neighbors = None error_ret = (None, None) current_peer_degree = len(current_peer_neighbors) if current_peer_degree == 0: # nowhere to go log.debug("%s: current peer degree is 0" % (self.my_hostport)) return error_ret next_peer = current_peer_neighbors[ random.randint(0, len(current_peer_neighbors)-1) ] next_peer_neighbors = self.get_neighbors( next_peer, con=con, path=path, peer_table=peer_table ) if next_peer_neighbors is None or len(next_peer_neighbors) == 0: # walk failed, or nowhere to go # restart the walk log.debug("%s: failed to get neighbors of %s" % (self.my_hostport, next_peer)) return error_ret next_peer_degree = len(next_peer_neighbors) p = random.random() if p <= min(1.0, float(current_peer_degree) / float(next_peer_degree)): if prev_peer == next_peer and current_peer_degree > 1: # find a different peer search = current_peer_neighbors[:] if next_peer in search: search.remove(next_peer) alt_peer = search[ random.randint(0, len(search)-1) ] alt_peer_neighbors = self.get_neighbors( alt_peer, con=con, path=path, peer_table=peer_table ) if alt_peer_neighbors is None or len(alt_peer_neighbors) == 0: # walk failed, or nowhere to go # restart the walk log.debug("%s: failed to get neighbors of %s" % (self.my_hostport, alt_peer)) return error_ret alt_peer_degree = len(alt_peer_neighbors) q = random.random() if q <= min( 1.0, min( 1.0, (float(current_peer_degree) / float(alt_peer_degree))**2 ), max( 1.0, (float(prev_peer_degree) / float(current_peer_degree))**2 ) ): # go to the alt peer instead ret_current_peer = alt_peer ret_current_peer_neighbors = alt_peer_neighbors else: # go to next peer ret_current_peer = next_peer ret_current_peer_neighbors = next_peer_neighbors else: # go to next peer ret_current_peer = next_peer ret_current_peer_neighbors = next_peer_neighbors else: # stay here ret_current_peer = current_peer ret_current_peer_neighbors = self.get_neighbors( current_peer, con=con, path=path, peer_table=peer_table ) if ret_current_peer_neighbors is None or len(ret_current_peer_neighbors) == 0: # nowhere to go log.debug("%s: failed to refresh %s" % (self.my_hostport, current_peer)) return error_ret return (ret_current_peer, ret_current_peer_neighbors)
Take one step from current_peer to a neighbor in current_peer_neighbors, based on Metropolis-Hastings Random Walk with Delayed Acceptance (MHRWDA). The basic idea is to reduce the probability (versus MH alone) that we transition to the previous node. We do so using the Metropolis-Hastings Random Walk with Delated Acceptance (MHRWDA) algorithm described in Lee, Xu, and Eun in SIGMETRICS 2012. Return the next peer.
Below is the the instruction that describes the task: ### Input: Take one step from current_peer to a neighbor in current_peer_neighbors, based on Metropolis-Hastings Random Walk with Delayed Acceptance (MHRWDA). The basic idea is to reduce the probability (versus MH alone) that we transition to the previous node. We do so using the Metropolis-Hastings Random Walk with Delated Acceptance (MHRWDA) algorithm described in Lee, Xu, and Eun in SIGMETRICS 2012. Return the next peer. ### Response: def random_walk_graph( self, prev_peer, prev_peer_degree, current_peer, current_peer_neighbors, con=None, path=None, peer_table=None ): """ Take one step from current_peer to a neighbor in current_peer_neighbors, based on Metropolis-Hastings Random Walk with Delayed Acceptance (MHRWDA). The basic idea is to reduce the probability (versus MH alone) that we transition to the previous node. We do so using the Metropolis-Hastings Random Walk with Delated Acceptance (MHRWDA) algorithm described in Lee, Xu, and Eun in SIGMETRICS 2012. Return the next peer. """ if path is None: path = self.atlasdb_path # the "next" current peer ret_current_peer = None ret_current_peer_neighbors = None error_ret = (None, None) current_peer_degree = len(current_peer_neighbors) if current_peer_degree == 0: # nowhere to go log.debug("%s: current peer degree is 0" % (self.my_hostport)) return error_ret next_peer = current_peer_neighbors[ random.randint(0, len(current_peer_neighbors)-1) ] next_peer_neighbors = self.get_neighbors( next_peer, con=con, path=path, peer_table=peer_table ) if next_peer_neighbors is None or len(next_peer_neighbors) == 0: # walk failed, or nowhere to go # restart the walk log.debug("%s: failed to get neighbors of %s" % (self.my_hostport, next_peer)) return error_ret next_peer_degree = len(next_peer_neighbors) p = random.random() if p <= min(1.0, float(current_peer_degree) / float(next_peer_degree)): if prev_peer == next_peer and current_peer_degree > 1: # find a different peer search = current_peer_neighbors[:] if next_peer in search: search.remove(next_peer) alt_peer = search[ random.randint(0, len(search)-1) ] alt_peer_neighbors = self.get_neighbors( alt_peer, con=con, path=path, peer_table=peer_table ) if alt_peer_neighbors is None or len(alt_peer_neighbors) == 0: # walk failed, or nowhere to go # restart the walk log.debug("%s: failed to get neighbors of %s" % (self.my_hostport, alt_peer)) return error_ret alt_peer_degree = len(alt_peer_neighbors) q = random.random() if q <= min( 1.0, min( 1.0, (float(current_peer_degree) / float(alt_peer_degree))**2 ), max( 1.0, (float(prev_peer_degree) / float(current_peer_degree))**2 ) ): # go to the alt peer instead ret_current_peer = alt_peer ret_current_peer_neighbors = alt_peer_neighbors else: # go to next peer ret_current_peer = next_peer ret_current_peer_neighbors = next_peer_neighbors else: # go to next peer ret_current_peer = next_peer ret_current_peer_neighbors = next_peer_neighbors else: # stay here ret_current_peer = current_peer ret_current_peer_neighbors = self.get_neighbors( current_peer, con=con, path=path, peer_table=peer_table ) if ret_current_peer_neighbors is None or len(ret_current_peer_neighbors) == 0: # nowhere to go log.debug("%s: failed to refresh %s" % (self.my_hostport, current_peer)) return error_ret return (ret_current_peer, ret_current_peer_neighbors)
def suggestion_delete(self, account_id): """ Remove the user with the given `account_id` from the follow suggestions. """ account_id = self.__unpack_id(account_id) url = '/api/v1/suggestions/{0}'.format(str(account_id)) self.__api_request('DELETE', url)
Remove the user with the given `account_id` from the follow suggestions.
Below is the the instruction that describes the task: ### Input: Remove the user with the given `account_id` from the follow suggestions. ### Response: def suggestion_delete(self, account_id): """ Remove the user with the given `account_id` from the follow suggestions. """ account_id = self.__unpack_id(account_id) url = '/api/v1/suggestions/{0}'.format(str(account_id)) self.__api_request('DELETE', url)
def get_audio_metadata(fname): """ collects basic MP3 metadata Works, once you use mutagenx (buried deep in issues page) ['Angels'] ['Red Back Fever'] ['Red Back Fever'] {'album': ['Red Back Fever'], 'title': ['Red Back Fever'], 'artist': ['Angels']} """ from mutagenx.easyid3 import EasyID3 audio = EasyID3(fname) audio_dict = {} try: artist = audio["artist"] except KeyError: artist = '' try: title = audio["title"] except KeyError: print("Cant get title") try: album = audio["album"] except KeyError: album = '' audio_dict['album'] = album audio_dict['title'] = title audio_dict['artist'] = artist return audio_dict
collects basic MP3 metadata Works, once you use mutagenx (buried deep in issues page) ['Angels'] ['Red Back Fever'] ['Red Back Fever'] {'album': ['Red Back Fever'], 'title': ['Red Back Fever'], 'artist': ['Angels']}
Below is the the instruction that describes the task: ### Input: collects basic MP3 metadata Works, once you use mutagenx (buried deep in issues page) ['Angels'] ['Red Back Fever'] ['Red Back Fever'] {'album': ['Red Back Fever'], 'title': ['Red Back Fever'], 'artist': ['Angels']} ### Response: def get_audio_metadata(fname): """ collects basic MP3 metadata Works, once you use mutagenx (buried deep in issues page) ['Angels'] ['Red Back Fever'] ['Red Back Fever'] {'album': ['Red Back Fever'], 'title': ['Red Back Fever'], 'artist': ['Angels']} """ from mutagenx.easyid3 import EasyID3 audio = EasyID3(fname) audio_dict = {} try: artist = audio["artist"] except KeyError: artist = '' try: title = audio["title"] except KeyError: print("Cant get title") try: album = audio["album"] except KeyError: album = '' audio_dict['album'] = album audio_dict['title'] = title audio_dict['artist'] = artist return audio_dict
def embed(parent_locals=None, parent_globals=None, exec_lines=None, remove_pyqt_hook=True, N=0): """ Starts interactive session. Similar to keyboard command in matlab. Wrapper around IPython.embed """ import utool as ut from functools import partial import IPython if parent_globals is None: parent_globals = get_parent_frame(N=N).f_globals if parent_locals is None: parent_locals = get_parent_frame(N=N).f_locals stackdepth = N # NOQA getframe = partial(ut.get_parent_frame, N=N) # NOQA # exec(execstr_dict(parent_globals, 'parent_globals')) # exec(execstr_dict(parent_locals, 'parent_locals')) print('') print('================') print(ut.bubbletext('EMBEDDING')) print('================') print('[util] embedding') try: if remove_pyqt_hook: try: import guitool guitool.remove_pyqt_input_hook() except (ImportError, ValueError, AttributeError) as ex: #print(ex) printex(ex, iswarning=True) pass # make qt not loop forever (I had qflag loop forever with this off) except ImportError as ex: print(ex) #from IPython.config.loader import Config # cfg = Config() #config_dict = {} #if exec_lines is not None: # config_dict['exec_lines'] = exec_lines #IPython.embed(**config_dict) print('[util] Get stack location with: ') print('[util] ut.get_parent_frame(N=8).f_code.co_name') print('[util] set EXIT_NOW or qqq to True(ish) to hard exit on unembed') #print('set iup to True to draw plottool stuff') print('[util] call %pylab qt4 to get plottool stuff working') once = True # Allow user to set iup and redo the loop while once or vars().get('iup', False): if not once: # SUPER HACKY WAY OF GETTING FIGURES ON THE SCREEN BETWEEN UPDATES #vars()['iup'] = False # ALL YOU NEED TO DO IS %pylab qt4 print('re-emebeding') #import plottool as pt #pt.update() #(pt.present()) for _ in range(100): time.sleep(.01) once = False #vars().get('iup', False): print('[util] calling IPython.embed()') """ Notes: /usr/local/lib/python2.7/dist-packages/IPython/terminal/embed.py IPython.terminal.embed.InteractiveShellEmbed # instance comes from IPython.config.configurable.SingletonConfigurable.instance """ #c = IPython.Config() #c.InteractiveShellApp.exec_lines = [ # '%pylab qt4', # '%gui qt4', # "print 'System Ready!'", #] #IPython.embed(config=c) parent_ns = parent_globals.copy() parent_ns.update(parent_locals) locals().update(parent_ns) try: IPython.embed() except RuntimeError as ex: ut.printex(ex, 'Failed to open ipython') #config = IPython.terminal.ipapp.load_default_config() #config.InteractiveShellEmbed = config.TerminalInteractiveShell #module = sys.modules[parent_globals['__name__']] #config['module'] = module #config['module'] = module #embed2(stack_depth=N + 2 + 1) #IPython.embed(config=config) #IPython.embed(config=config) #IPython.embed(module=module) # Exit python immediately if specifed if vars().get('EXIT_NOW', False) or vars().get('qqq', False): print('[utool.embed] EXIT_NOW specified') sys.exit(1)
Starts interactive session. Similar to keyboard command in matlab. Wrapper around IPython.embed
Below is the the instruction that describes the task: ### Input: Starts interactive session. Similar to keyboard command in matlab. Wrapper around IPython.embed ### Response: def embed(parent_locals=None, parent_globals=None, exec_lines=None, remove_pyqt_hook=True, N=0): """ Starts interactive session. Similar to keyboard command in matlab. Wrapper around IPython.embed """ import utool as ut from functools import partial import IPython if parent_globals is None: parent_globals = get_parent_frame(N=N).f_globals if parent_locals is None: parent_locals = get_parent_frame(N=N).f_locals stackdepth = N # NOQA getframe = partial(ut.get_parent_frame, N=N) # NOQA # exec(execstr_dict(parent_globals, 'parent_globals')) # exec(execstr_dict(parent_locals, 'parent_locals')) print('') print('================') print(ut.bubbletext('EMBEDDING')) print('================') print('[util] embedding') try: if remove_pyqt_hook: try: import guitool guitool.remove_pyqt_input_hook() except (ImportError, ValueError, AttributeError) as ex: #print(ex) printex(ex, iswarning=True) pass # make qt not loop forever (I had qflag loop forever with this off) except ImportError as ex: print(ex) #from IPython.config.loader import Config # cfg = Config() #config_dict = {} #if exec_lines is not None: # config_dict['exec_lines'] = exec_lines #IPython.embed(**config_dict) print('[util] Get stack location with: ') print('[util] ut.get_parent_frame(N=8).f_code.co_name') print('[util] set EXIT_NOW or qqq to True(ish) to hard exit on unembed') #print('set iup to True to draw plottool stuff') print('[util] call %pylab qt4 to get plottool stuff working') once = True # Allow user to set iup and redo the loop while once or vars().get('iup', False): if not once: # SUPER HACKY WAY OF GETTING FIGURES ON THE SCREEN BETWEEN UPDATES #vars()['iup'] = False # ALL YOU NEED TO DO IS %pylab qt4 print('re-emebeding') #import plottool as pt #pt.update() #(pt.present()) for _ in range(100): time.sleep(.01) once = False #vars().get('iup', False): print('[util] calling IPython.embed()') """ Notes: /usr/local/lib/python2.7/dist-packages/IPython/terminal/embed.py IPython.terminal.embed.InteractiveShellEmbed # instance comes from IPython.config.configurable.SingletonConfigurable.instance """ #c = IPython.Config() #c.InteractiveShellApp.exec_lines = [ # '%pylab qt4', # '%gui qt4', # "print 'System Ready!'", #] #IPython.embed(config=c) parent_ns = parent_globals.copy() parent_ns.update(parent_locals) locals().update(parent_ns) try: IPython.embed() except RuntimeError as ex: ut.printex(ex, 'Failed to open ipython') #config = IPython.terminal.ipapp.load_default_config() #config.InteractiveShellEmbed = config.TerminalInteractiveShell #module = sys.modules[parent_globals['__name__']] #config['module'] = module #config['module'] = module #embed2(stack_depth=N + 2 + 1) #IPython.embed(config=config) #IPython.embed(config=config) #IPython.embed(module=module) # Exit python immediately if specifed if vars().get('EXIT_NOW', False) or vars().get('qqq', False): print('[utool.embed] EXIT_NOW specified') sys.exit(1)
def list_entity_aliases(self, method='LIST', mount_point=DEFAULT_MOUNT_POINT): """List available entity aliases by their identifiers. :param method: Supported methods: LIST: /{mount_point}/entity-alias/id. Produces: 200 application/json GET: /{mount_point}/entity-alias/id?list=true. Produces: 200 application/json :type method: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The the JSON response of the request. :rtype: dict """ if method == 'LIST': api_path = '/v1/{mount_point}/entity-alias/id'.format(mount_point=mount_point) response = self._adapter.list( url=api_path, ) elif method == 'GET': api_path = '/v1/{mount_point}/entity-alias/id?list=true'.format(mount_point=mount_point) response = self._adapter.get( url=api_path, ) else: error_message = '"method" parameter provided invalid value; LIST or GET allowed, "{method}" provided'.format(method=method) raise exceptions.ParamValidationError(error_message) return response.json()
List available entity aliases by their identifiers. :param method: Supported methods: LIST: /{mount_point}/entity-alias/id. Produces: 200 application/json GET: /{mount_point}/entity-alias/id?list=true. Produces: 200 application/json :type method: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The the JSON response of the request. :rtype: dict
Below is the the instruction that describes the task: ### Input: List available entity aliases by their identifiers. :param method: Supported methods: LIST: /{mount_point}/entity-alias/id. Produces: 200 application/json GET: /{mount_point}/entity-alias/id?list=true. Produces: 200 application/json :type method: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The the JSON response of the request. :rtype: dict ### Response: def list_entity_aliases(self, method='LIST', mount_point=DEFAULT_MOUNT_POINT): """List available entity aliases by their identifiers. :param method: Supported methods: LIST: /{mount_point}/entity-alias/id. Produces: 200 application/json GET: /{mount_point}/entity-alias/id?list=true. Produces: 200 application/json :type method: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The the JSON response of the request. :rtype: dict """ if method == 'LIST': api_path = '/v1/{mount_point}/entity-alias/id'.format(mount_point=mount_point) response = self._adapter.list( url=api_path, ) elif method == 'GET': api_path = '/v1/{mount_point}/entity-alias/id?list=true'.format(mount_point=mount_point) response = self._adapter.get( url=api_path, ) else: error_message = '"method" parameter provided invalid value; LIST or GET allowed, "{method}" provided'.format(method=method) raise exceptions.ParamValidationError(error_message) return response.json()
def store(self, extractions: List[Extraction], attribute: str, group_by_tags: bool = True) -> None: """ Records extractions in the container, and for each individual extraction inserts a ProvenanceRecord to record where the extraction is stored. Records the "output_segment" in the provenance. Extractions are always recorded in a list. Errors out if the segment is primitive, such as a string. Args: extractions (List[Extraction]): attribute (str): where to store the extractions. group_by_tags (bool): Set to True to use tags as sub-keys, and values of Extractions with the same tag will be stored in a list as the value of the corresponding key. (if none of the Extractions has 'tag', do not group by tags) Returns: """ if not isinstance(self._value, dict): raise StoreExtractionError("segment is type: " + str(type(self._value))) if not len(extractions): return if group_by_tags: try: next(x for x in extractions if x.tag) # if there is at least one extraction with a tag if attribute not in self._extractions: self._extractions[attribute] = set([]) self._value[attribute] = {} extraction_provenances = {} for e in extractions: tag = e.tag if e.tag else 'NO_TAGS' if tag not in self.value[attribute]: self.value[attribute][tag] = [e.value] else: if e.value not in self.value[attribute][tag]: self.value[attribute][tag].append(e.value) # TODO: handle provenance of non literals if isinstance(e.value, Number) or isinstance(e.value, str): extraction_provenances[e.value] = e.prov_id self._extractions[attribute] = self._extractions[attribute].union(extractions) new_id = self._document.provenance_id_index # for the purpose of provenance hierarrchy tracking storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path, attribute, extraction_provenances, self.document) self._document.provenance_id_index_incrementer() self._document.provenances[new_id] = storage_provenance_record self.create_storage_provenance(storage_provenance_record) return except StopIteration: pass if attribute not in self._extractions: self._extractions[attribute] = set([]) self._value[attribute] = list() self._extractions[attribute] = self._extractions[attribute].union(extractions) extraction_provenances = dict() for a_extraction in extractions: # TODO: handle provenance of non literals if isinstance(a_extraction.value, Number) or isinstance(a_extraction.value, str): extraction_provenances[a_extraction.value] = a_extraction.prov_id if a_extraction.value not in self._value[attribute]: self._value[attribute].append(a_extraction.value) new_id = self._document.provenance_id_index # for the purpose of provenance hierarchy tracking storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path, attribute, extraction_provenances, self.document) self._document.provenance_id_index_incrementer() self._document.provenances[new_id] = storage_provenance_record self.create_storage_provenance(storage_provenance_record)
Records extractions in the container, and for each individual extraction inserts a ProvenanceRecord to record where the extraction is stored. Records the "output_segment" in the provenance. Extractions are always recorded in a list. Errors out if the segment is primitive, such as a string. Args: extractions (List[Extraction]): attribute (str): where to store the extractions. group_by_tags (bool): Set to True to use tags as sub-keys, and values of Extractions with the same tag will be stored in a list as the value of the corresponding key. (if none of the Extractions has 'tag', do not group by tags) Returns:
Below is the the instruction that describes the task: ### Input: Records extractions in the container, and for each individual extraction inserts a ProvenanceRecord to record where the extraction is stored. Records the "output_segment" in the provenance. Extractions are always recorded in a list. Errors out if the segment is primitive, such as a string. Args: extractions (List[Extraction]): attribute (str): where to store the extractions. group_by_tags (bool): Set to True to use tags as sub-keys, and values of Extractions with the same tag will be stored in a list as the value of the corresponding key. (if none of the Extractions has 'tag', do not group by tags) Returns: ### Response: def store(self, extractions: List[Extraction], attribute: str, group_by_tags: bool = True) -> None: """ Records extractions in the container, and for each individual extraction inserts a ProvenanceRecord to record where the extraction is stored. Records the "output_segment" in the provenance. Extractions are always recorded in a list. Errors out if the segment is primitive, such as a string. Args: extractions (List[Extraction]): attribute (str): where to store the extractions. group_by_tags (bool): Set to True to use tags as sub-keys, and values of Extractions with the same tag will be stored in a list as the value of the corresponding key. (if none of the Extractions has 'tag', do not group by tags) Returns: """ if not isinstance(self._value, dict): raise StoreExtractionError("segment is type: " + str(type(self._value))) if not len(extractions): return if group_by_tags: try: next(x for x in extractions if x.tag) # if there is at least one extraction with a tag if attribute not in self._extractions: self._extractions[attribute] = set([]) self._value[attribute] = {} extraction_provenances = {} for e in extractions: tag = e.tag if e.tag else 'NO_TAGS' if tag not in self.value[attribute]: self.value[attribute][tag] = [e.value] else: if e.value not in self.value[attribute][tag]: self.value[attribute][tag].append(e.value) # TODO: handle provenance of non literals if isinstance(e.value, Number) or isinstance(e.value, str): extraction_provenances[e.value] = e.prov_id self._extractions[attribute] = self._extractions[attribute].union(extractions) new_id = self._document.provenance_id_index # for the purpose of provenance hierarrchy tracking storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path, attribute, extraction_provenances, self.document) self._document.provenance_id_index_incrementer() self._document.provenances[new_id] = storage_provenance_record self.create_storage_provenance(storage_provenance_record) return except StopIteration: pass if attribute not in self._extractions: self._extractions[attribute] = set([]) self._value[attribute] = list() self._extractions[attribute] = self._extractions[attribute].union(extractions) extraction_provenances = dict() for a_extraction in extractions: # TODO: handle provenance of non literals if isinstance(a_extraction.value, Number) or isinstance(a_extraction.value, str): extraction_provenances[a_extraction.value] = a_extraction.prov_id if a_extraction.value not in self._value[attribute]: self._value[attribute].append(a_extraction.value) new_id = self._document.provenance_id_index # for the purpose of provenance hierarchy tracking storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path, attribute, extraction_provenances, self.document) self._document.provenance_id_index_incrementer() self._document.provenances[new_id] = storage_provenance_record self.create_storage_provenance(storage_provenance_record)
def barycentric_to_points(triangles, barycentric): """ Convert a list of barycentric coordinates on a list of triangles to cartesian points. Parameters ------------ triangles : (n, 3, 3) float Triangles in space barycentric : (n, 2) float Barycentric coordinates Returns ----------- points : (m, 3) float Points in space """ barycentric = np.asanyarray(barycentric, dtype=np.float64) triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): raise ValueError('Triangles must be (n,3,3)!') if barycentric.shape == (2,): barycentric = np.ones((len(triangles), 2), dtype=np.float64) * barycentric if util.is_shape(barycentric, (len(triangles), 2)): barycentric = np.column_stack((barycentric, 1.0 - barycentric.sum(axis=1))) elif not util.is_shape(barycentric, (len(triangles), 3)): raise ValueError('Barycentric shape incorrect!') barycentric /= barycentric.sum(axis=1).reshape((-1, 1)) points = (triangles * barycentric.reshape((-1, 3, 1))).sum(axis=1) return points
Convert a list of barycentric coordinates on a list of triangles to cartesian points. Parameters ------------ triangles : (n, 3, 3) float Triangles in space barycentric : (n, 2) float Barycentric coordinates Returns ----------- points : (m, 3) float Points in space
Below is the the instruction that describes the task: ### Input: Convert a list of barycentric coordinates on a list of triangles to cartesian points. Parameters ------------ triangles : (n, 3, 3) float Triangles in space barycentric : (n, 2) float Barycentric coordinates Returns ----------- points : (m, 3) float Points in space ### Response: def barycentric_to_points(triangles, barycentric): """ Convert a list of barycentric coordinates on a list of triangles to cartesian points. Parameters ------------ triangles : (n, 3, 3) float Triangles in space barycentric : (n, 2) float Barycentric coordinates Returns ----------- points : (m, 3) float Points in space """ barycentric = np.asanyarray(barycentric, dtype=np.float64) triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): raise ValueError('Triangles must be (n,3,3)!') if barycentric.shape == (2,): barycentric = np.ones((len(triangles), 2), dtype=np.float64) * barycentric if util.is_shape(barycentric, (len(triangles), 2)): barycentric = np.column_stack((barycentric, 1.0 - barycentric.sum(axis=1))) elif not util.is_shape(barycentric, (len(triangles), 3)): raise ValueError('Barycentric shape incorrect!') barycentric /= barycentric.sum(axis=1).reshape((-1, 1)) points = (triangles * barycentric.reshape((-1, 3, 1))).sum(axis=1) return points
def get_eod_frame(self): """Return the eod market data frame for pricing""" close = self.pxs.close mktval = self.get_mkt_val(close) dvds = self.pxs.dvds df = pd.DataFrame({'close': close, 'mkt_val': mktval, 'dvds': dvds}) df.index.name = 'date' return df
Return the eod market data frame for pricing
Below is the the instruction that describes the task: ### Input: Return the eod market data frame for pricing ### Response: def get_eod_frame(self): """Return the eod market data frame for pricing""" close = self.pxs.close mktval = self.get_mkt_val(close) dvds = self.pxs.dvds df = pd.DataFrame({'close': close, 'mkt_val': mktval, 'dvds': dvds}) df.index.name = 'date' return df
def load_attachments(self, source, skeleton): '''Load attachment configuration from the given text source. The attachment configuration file has a simple format. After discarding Unix-style comments (any part of a line that starts with the pound (#) character), each line in the file is then expected to have the following format:: marker-name body-name X Y Z The marker name must correspond to an existing "channel" in our marker data. The body name must correspond to a rigid body in the skeleton. The X, Y, and Z coordinates specify the body-relative offsets where the marker should be attached: 0 corresponds to the center of the body along the given axis, while -1 and 1 correspond to the minimal (maximal, respectively) extent of the body's bounding box along the corresponding dimension. Parameters ---------- source : str or file-like A filename or file-like object that we can use to obtain text configuration that describes how markers are attached to skeleton bodies. skeleton : :class:`pagoda.skeleton.Skeleton` The skeleton to attach our marker data to. ''' self.targets = {} self.offsets = {} filename = source if isinstance(source, str): source = open(source) else: filename = '(file-{})'.format(id(source)) for i, line in enumerate(source): tokens = line.split('#')[0].strip().split() if not tokens: continue label = tokens.pop(0) if label not in self.channels: logging.info('%s:%d: unknown marker %s', filename, i, label) continue if not tokens: continue name = tokens.pop(0) bodies = [b for b in skeleton.bodies if b.name == name] if len(bodies) != 1: logging.info('%s:%d: %d skeleton bodies match %s', filename, i, len(bodies), name) continue b = self.targets[label] = bodies[0] o = self.offsets[label] = \ np.array(list(map(float, tokens))) * b.dimensions / 2 logging.info('%s <--> %s, offset %s', label, b.name, o)
Load attachment configuration from the given text source. The attachment configuration file has a simple format. After discarding Unix-style comments (any part of a line that starts with the pound (#) character), each line in the file is then expected to have the following format:: marker-name body-name X Y Z The marker name must correspond to an existing "channel" in our marker data. The body name must correspond to a rigid body in the skeleton. The X, Y, and Z coordinates specify the body-relative offsets where the marker should be attached: 0 corresponds to the center of the body along the given axis, while -1 and 1 correspond to the minimal (maximal, respectively) extent of the body's bounding box along the corresponding dimension. Parameters ---------- source : str or file-like A filename or file-like object that we can use to obtain text configuration that describes how markers are attached to skeleton bodies. skeleton : :class:`pagoda.skeleton.Skeleton` The skeleton to attach our marker data to.
Below is the the instruction that describes the task: ### Input: Load attachment configuration from the given text source. The attachment configuration file has a simple format. After discarding Unix-style comments (any part of a line that starts with the pound (#) character), each line in the file is then expected to have the following format:: marker-name body-name X Y Z The marker name must correspond to an existing "channel" in our marker data. The body name must correspond to a rigid body in the skeleton. The X, Y, and Z coordinates specify the body-relative offsets where the marker should be attached: 0 corresponds to the center of the body along the given axis, while -1 and 1 correspond to the minimal (maximal, respectively) extent of the body's bounding box along the corresponding dimension. Parameters ---------- source : str or file-like A filename or file-like object that we can use to obtain text configuration that describes how markers are attached to skeleton bodies. skeleton : :class:`pagoda.skeleton.Skeleton` The skeleton to attach our marker data to. ### Response: def load_attachments(self, source, skeleton): '''Load attachment configuration from the given text source. The attachment configuration file has a simple format. After discarding Unix-style comments (any part of a line that starts with the pound (#) character), each line in the file is then expected to have the following format:: marker-name body-name X Y Z The marker name must correspond to an existing "channel" in our marker data. The body name must correspond to a rigid body in the skeleton. The X, Y, and Z coordinates specify the body-relative offsets where the marker should be attached: 0 corresponds to the center of the body along the given axis, while -1 and 1 correspond to the minimal (maximal, respectively) extent of the body's bounding box along the corresponding dimension. Parameters ---------- source : str or file-like A filename or file-like object that we can use to obtain text configuration that describes how markers are attached to skeleton bodies. skeleton : :class:`pagoda.skeleton.Skeleton` The skeleton to attach our marker data to. ''' self.targets = {} self.offsets = {} filename = source if isinstance(source, str): source = open(source) else: filename = '(file-{})'.format(id(source)) for i, line in enumerate(source): tokens = line.split('#')[0].strip().split() if not tokens: continue label = tokens.pop(0) if label not in self.channels: logging.info('%s:%d: unknown marker %s', filename, i, label) continue if not tokens: continue name = tokens.pop(0) bodies = [b for b in skeleton.bodies if b.name == name] if len(bodies) != 1: logging.info('%s:%d: %d skeleton bodies match %s', filename, i, len(bodies), name) continue b = self.targets[label] = bodies[0] o = self.offsets[label] = \ np.array(list(map(float, tokens))) * b.dimensions / 2 logging.info('%s <--> %s, offset %s', label, b.name, o)
def getTraitCovarStdErrors(self,term_i): """ Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar) Args: term_i: index of the term we are interested in """ assert self.init, 'GP not initialised' assert self.fast==False, 'Not supported for fast implementation' if self.P==1: out = (2*self.getScales()[term_i])**2*self._getLaplaceCovar()[term_i,term_i] else: C = self.vd.getTerm(term_i).getTraitCovar() n_params = C.getNumberParams() par_index = 0 for term in range(term_i-1): par_index += self.vd.getTerm(term_i).getNumberScales() Sigma1 = self._getLaplaceCovar()[par_index:(par_index+n_params),:][:,par_index:(par_index+n_params)] out = sp.zeros((self.P,self.P)) for param_i in range(n_params): out += C.Kgrad_param(param_i)**2*Sigma1[param_i,param_i] for param_j in range(param_i): out += 2*abs(C.Kgrad_param(param_i)*C.Kgrad_param(param_j))*Sigma1[param_i,param_j] out = sp.sqrt(out) return out
Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar) Args: term_i: index of the term we are interested in
Below is the the instruction that describes the task: ### Input: Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar) Args: term_i: index of the term we are interested in ### Response: def getTraitCovarStdErrors(self,term_i): """ Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar) Args: term_i: index of the term we are interested in """ assert self.init, 'GP not initialised' assert self.fast==False, 'Not supported for fast implementation' if self.P==1: out = (2*self.getScales()[term_i])**2*self._getLaplaceCovar()[term_i,term_i] else: C = self.vd.getTerm(term_i).getTraitCovar() n_params = C.getNumberParams() par_index = 0 for term in range(term_i-1): par_index += self.vd.getTerm(term_i).getNumberScales() Sigma1 = self._getLaplaceCovar()[par_index:(par_index+n_params),:][:,par_index:(par_index+n_params)] out = sp.zeros((self.P,self.P)) for param_i in range(n_params): out += C.Kgrad_param(param_i)**2*Sigma1[param_i,param_i] for param_j in range(param_i): out += 2*abs(C.Kgrad_param(param_i)*C.Kgrad_param(param_j))*Sigma1[param_i,param_j] out = sp.sqrt(out) return out
def format_path(self, repository, namespace=None, rw=False): '''format the repository's URL :param repository: name of the repository :param namespace: namespace of the repository :param rw: return a git+ssh URL if true, an https URL otherwise :return: the full URI of the repository ready to use as remote if namespace is not given, repository is expected to be of format `<namespace>/<repository>`. ''' repo = repository if namespace: repo = '{}/{}'.format(namespace, repository) if not rw and repo.count('/') >= self._min_nested_namespaces: return '{}/{}'.format(self.url_ro, repo) elif rw and repo.count('/') >= self._min_nested_namespaces: if self.url_rw.startswith('ssh://'): return '{}/{}'.format(self.url_rw, repo) else: return '{}:{}'.format(self.url_rw, repo) else: raise ArgumentError("Cannot tell how to handle this url: `{}/{}`!".format(namespace, repo))
format the repository's URL :param repository: name of the repository :param namespace: namespace of the repository :param rw: return a git+ssh URL if true, an https URL otherwise :return: the full URI of the repository ready to use as remote if namespace is not given, repository is expected to be of format `<namespace>/<repository>`.
Below is the the instruction that describes the task: ### Input: format the repository's URL :param repository: name of the repository :param namespace: namespace of the repository :param rw: return a git+ssh URL if true, an https URL otherwise :return: the full URI of the repository ready to use as remote if namespace is not given, repository is expected to be of format `<namespace>/<repository>`. ### Response: def format_path(self, repository, namespace=None, rw=False): '''format the repository's URL :param repository: name of the repository :param namespace: namespace of the repository :param rw: return a git+ssh URL if true, an https URL otherwise :return: the full URI of the repository ready to use as remote if namespace is not given, repository is expected to be of format `<namespace>/<repository>`. ''' repo = repository if namespace: repo = '{}/{}'.format(namespace, repository) if not rw and repo.count('/') >= self._min_nested_namespaces: return '{}/{}'.format(self.url_ro, repo) elif rw and repo.count('/') >= self._min_nested_namespaces: if self.url_rw.startswith('ssh://'): return '{}/{}'.format(self.url_rw, repo) else: return '{}:{}'.format(self.url_rw, repo) else: raise ArgumentError("Cannot tell how to handle this url: `{}/{}`!".format(namespace, repo))
def ProtoFromDataFrames(self, dataframes): """Creates a feature statistics proto from a set of pandas dataframes. Args: dataframes: A list of dicts describing tables for each dataset for the proto. Each entry contains a 'table' field of the dataframe of the data and a 'name' field to identify the dataset in the proto. Returns: The feature statistics proto for the provided tables. """ datasets = [] for dataframe in dataframes: table = dataframe['table'] table_entries = {} for col in table: table_entries[col] = self.NdarrayToEntry(table[col]) datasets.append({ 'entries': table_entries, 'size': len(table), 'name': dataframe['name'] }) return self.GetDatasetsProto(datasets)
Creates a feature statistics proto from a set of pandas dataframes. Args: dataframes: A list of dicts describing tables for each dataset for the proto. Each entry contains a 'table' field of the dataframe of the data and a 'name' field to identify the dataset in the proto. Returns: The feature statistics proto for the provided tables.
Below is the the instruction that describes the task: ### Input: Creates a feature statistics proto from a set of pandas dataframes. Args: dataframes: A list of dicts describing tables for each dataset for the proto. Each entry contains a 'table' field of the dataframe of the data and a 'name' field to identify the dataset in the proto. Returns: The feature statistics proto for the provided tables. ### Response: def ProtoFromDataFrames(self, dataframes): """Creates a feature statistics proto from a set of pandas dataframes. Args: dataframes: A list of dicts describing tables for each dataset for the proto. Each entry contains a 'table' field of the dataframe of the data and a 'name' field to identify the dataset in the proto. Returns: The feature statistics proto for the provided tables. """ datasets = [] for dataframe in dataframes: table = dataframe['table'] table_entries = {} for col in table: table_entries[col] = self.NdarrayToEntry(table[col]) datasets.append({ 'entries': table_entries, 'size': len(table), 'name': dataframe['name'] }) return self.GetDatasetsProto(datasets)
def count_base_units(units): """Returns a dict mapping names of base units to how many times they appear in the given iterable of units. Effectively this counts how many length units you have, how many time units, and so forth. """ ret = {} for unit in units: factor, base_unit = get_conversion_factor(unit) ret.setdefault(base_unit, 0) ret[base_unit] += 1 return ret
Returns a dict mapping names of base units to how many times they appear in the given iterable of units. Effectively this counts how many length units you have, how many time units, and so forth.
Below is the the instruction that describes the task: ### Input: Returns a dict mapping names of base units to how many times they appear in the given iterable of units. Effectively this counts how many length units you have, how many time units, and so forth. ### Response: def count_base_units(units): """Returns a dict mapping names of base units to how many times they appear in the given iterable of units. Effectively this counts how many length units you have, how many time units, and so forth. """ ret = {} for unit in units: factor, base_unit = get_conversion_factor(unit) ret.setdefault(base_unit, 0) ret[base_unit] += 1 return ret
def get_power(self): """Returns the Power in Watt""" power_dict = self.get_power_all() for device in power_dict.keys(): power_dict[device] = float(power_dict[device]) / 1000.0 return power_dict
Returns the Power in Watt
Below is the the instruction that describes the task: ### Input: Returns the Power in Watt ### Response: def get_power(self): """Returns the Power in Watt""" power_dict = self.get_power_all() for device in power_dict.keys(): power_dict[device] = float(power_dict[device]) / 1000.0 return power_dict
def sell_open(id_or_ins, amount, price=None, style=None): """ 卖出开仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None """ return order(id_or_ins, amount, SIDE.SELL, POSITION_EFFECT.OPEN, cal_style(price, style))
卖出开仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None
Below is the the instruction that describes the task: ### Input: 卖出开仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None ### Response: def sell_open(id_or_ins, amount, price=None, style=None): """ 卖出开仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None """ return order(id_or_ins, amount, SIDE.SELL, POSITION_EFFECT.OPEN, cal_style(price, style))
def solve_semi_dual_entropic(a, b, M, reg, method, numItermax=10000, lr=None, log=False): ''' Compute the transportation matrix to solve the regularized discrete measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the SAG or ASGD algorithms as proposed in [18]_ Parameters ---------- a : np.ndarray(ns,) source measure b : np.ndarray(nt,) target measure M : np.ndarray(ns, nt) cost matrix reg : float number Regularization term > 0 methode : str used method (SAG or ASGD) numItermax : int number number of iteration lr : float number learning rate n_source : int number size of the source measure n_target : int number size of the target measure log : bool, optional record log if True Returns ------- pi : np.ndarray(ns, nt) transportation matrix log : dict log dictionary return only if log==True in parameters Examples -------- >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 300000 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> method = "ASGD" >>> asgd_pi = stochastic.solve_semi_dual_entropic(a, b, M, reg, method, numItermax) >>> print(asgd_pi) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527. ''' if method.lower() == "sag": opt_beta = sag_entropic_transport(a, b, M, reg, numItermax, lr) elif method.lower() == "asgd": opt_beta = averaged_sgd_entropic_transport(a, b, M, reg, numItermax, lr) else: print("Please, select your method between SAG and ASGD") return None opt_alpha = c_transform_entropic(b, M, reg, opt_beta) pi = (np.exp((opt_alpha[:, None] + opt_beta[None, :] - M[:, :]) / reg) * a[:, None] * b[None, :]) if log: log = {} log['alpha'] = opt_alpha log['beta'] = opt_beta return pi, log else: return pi
Compute the transportation matrix to solve the regularized discrete measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the SAG or ASGD algorithms as proposed in [18]_ Parameters ---------- a : np.ndarray(ns,) source measure b : np.ndarray(nt,) target measure M : np.ndarray(ns, nt) cost matrix reg : float number Regularization term > 0 methode : str used method (SAG or ASGD) numItermax : int number number of iteration lr : float number learning rate n_source : int number size of the source measure n_target : int number size of the target measure log : bool, optional record log if True Returns ------- pi : np.ndarray(ns, nt) transportation matrix log : dict log dictionary return only if log==True in parameters Examples -------- >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 300000 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> method = "ASGD" >>> asgd_pi = stochastic.solve_semi_dual_entropic(a, b, M, reg, method, numItermax) >>> print(asgd_pi) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527.
Below is the the instruction that describes the task: ### Input: Compute the transportation matrix to solve the regularized discrete measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the SAG or ASGD algorithms as proposed in [18]_ Parameters ---------- a : np.ndarray(ns,) source measure b : np.ndarray(nt,) target measure M : np.ndarray(ns, nt) cost matrix reg : float number Regularization term > 0 methode : str used method (SAG or ASGD) numItermax : int number number of iteration lr : float number learning rate n_source : int number size of the source measure n_target : int number size of the target measure log : bool, optional record log if True Returns ------- pi : np.ndarray(ns, nt) transportation matrix log : dict log dictionary return only if log==True in parameters Examples -------- >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 300000 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> method = "ASGD" >>> asgd_pi = stochastic.solve_semi_dual_entropic(a, b, M, reg, method, numItermax) >>> print(asgd_pi) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527. ### Response: def solve_semi_dual_entropic(a, b, M, reg, method, numItermax=10000, lr=None, log=False): ''' Compute the transportation matrix to solve the regularized discrete measures optimal transport max problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the SAG or ASGD algorithms as proposed in [18]_ Parameters ---------- a : np.ndarray(ns,) source measure b : np.ndarray(nt,) target measure M : np.ndarray(ns, nt) cost matrix reg : float number Regularization term > 0 methode : str used method (SAG or ASGD) numItermax : int number number of iteration lr : float number learning rate n_source : int number size of the source measure n_target : int number size of the target measure log : bool, optional record log if True Returns ------- pi : np.ndarray(ns, nt) transportation matrix log : dict log dictionary return only if log==True in parameters Examples -------- >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 300000 >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> method = "ASGD" >>> asgd_pi = stochastic.solve_semi_dual_entropic(a, b, M, reg, method, numItermax) >>> print(asgd_pi) References ---------- [Genevay et al., 2016] : Stochastic Optimization for Large-scale Optimal Transport, Advances in Neural Information Processing Systems (2016), arXiv preprint arxiv:1605.08527. ''' if method.lower() == "sag": opt_beta = sag_entropic_transport(a, b, M, reg, numItermax, lr) elif method.lower() == "asgd": opt_beta = averaged_sgd_entropic_transport(a, b, M, reg, numItermax, lr) else: print("Please, select your method between SAG and ASGD") return None opt_alpha = c_transform_entropic(b, M, reg, opt_beta) pi = (np.exp((opt_alpha[:, None] + opt_beta[None, :] - M[:, :]) / reg) * a[:, None] * b[None, :]) if log: log = {} log['alpha'] = opt_alpha log['beta'] = opt_beta return pi, log else: return pi
def init_from_wave_file(wavpath): """Init a sonic visualiser environment structure based the analysis of the main audio file. The audio file have to be encoded in wave Args: wavpath(str): the full path to the wavfile """ try: samplerate, data = SW.read(wavpath) nframes = data.shape[0] except: # scipy cannot handle 24 bit wav files # and wave cannot handle 32 bit wav files try: w = wave.open(wavpath) samplerate = w.getframerate() nframes = w.getnframes() except: raise Exception('Cannot decode wavefile ' + wavpath) return SVEnv(samplerate, nframes, wavpath)
Init a sonic visualiser environment structure based the analysis of the main audio file. The audio file have to be encoded in wave Args: wavpath(str): the full path to the wavfile
Below is the the instruction that describes the task: ### Input: Init a sonic visualiser environment structure based the analysis of the main audio file. The audio file have to be encoded in wave Args: wavpath(str): the full path to the wavfile ### Response: def init_from_wave_file(wavpath): """Init a sonic visualiser environment structure based the analysis of the main audio file. The audio file have to be encoded in wave Args: wavpath(str): the full path to the wavfile """ try: samplerate, data = SW.read(wavpath) nframes = data.shape[0] except: # scipy cannot handle 24 bit wav files # and wave cannot handle 32 bit wav files try: w = wave.open(wavpath) samplerate = w.getframerate() nframes = w.getnframes() except: raise Exception('Cannot decode wavefile ' + wavpath) return SVEnv(samplerate, nframes, wavpath)
def optional(name, default) -> 'Wildcard': """Create a `Wildcard` that matches a single argument with a default value. If the wildcard does not match, the substitution will contain the default value instead. Args: name: The name for the wildcard. default: The default value of the wildcard. Returns: A n optional wildcard. """ return Wildcard(min_count=1, fixed_size=True, variable_name=name, optional=default)
Create a `Wildcard` that matches a single argument with a default value. If the wildcard does not match, the substitution will contain the default value instead. Args: name: The name for the wildcard. default: The default value of the wildcard. Returns: A n optional wildcard.
Below is the the instruction that describes the task: ### Input: Create a `Wildcard` that matches a single argument with a default value. If the wildcard does not match, the substitution will contain the default value instead. Args: name: The name for the wildcard. default: The default value of the wildcard. Returns: A n optional wildcard. ### Response: def optional(name, default) -> 'Wildcard': """Create a `Wildcard` that matches a single argument with a default value. If the wildcard does not match, the substitution will contain the default value instead. Args: name: The name for the wildcard. default: The default value of the wildcard. Returns: A n optional wildcard. """ return Wildcard(min_count=1, fixed_size=True, variable_name=name, optional=default)
def get_interface_detail_output_interface_if_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') if_name = ET.SubElement(interface, "if-name") if_name.text = kwargs.pop('if_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def get_interface_detail_output_interface_if_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') if_name = ET.SubElement(interface, "if-name") if_name.text = kwargs.pop('if_name') callback = kwargs.pop('callback', self._callback) return callback(config)
def execute_route(self, meta_data, request_pdu): """ Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU. """ try: function = create_function_from_request_pdu(request_pdu) results =\ function.execute(meta_data['unit_id'], self.route_map) try: # ReadFunction's use results of callbacks to build response # PDU... return function.create_response_pdu(results) except TypeError: # ...other functions don't. return function.create_response_pdu() except ModbusError as e: function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, e.error_code) except Exception as e: log.exception('Could not handle request: {0}.'.format(e)) function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, ServerDeviceFailureError.error_code)
Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU.
Below is the the instruction that describes the task: ### Input: Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU. ### Response: def execute_route(self, meta_data, request_pdu): """ Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU. """ try: function = create_function_from_request_pdu(request_pdu) results =\ function.execute(meta_data['unit_id'], self.route_map) try: # ReadFunction's use results of callbacks to build response # PDU... return function.create_response_pdu(results) except TypeError: # ...other functions don't. return function.create_response_pdu() except ModbusError as e: function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, e.error_code) except Exception as e: log.exception('Could not handle request: {0}.'.format(e)) function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, ServerDeviceFailureError.error_code)
def _to_enos_roles(roles): """Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init` """ def to_host(h): extra = {} # create extra_vars for the nics # network_role = ethX for nic, roles in h["nics"]: for role in roles: extra[role] = nic return Host(h["host"], user="root", extra=extra) enos_roles = {} for role, hosts in roles.items(): enos_roles[role] = [to_host(h) for h in hosts] logger.debug(enos_roles) return enos_roles
Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init`
Below is the the instruction that describes the task: ### Input: Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init` ### Response: def _to_enos_roles(roles): """Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init` """ def to_host(h): extra = {} # create extra_vars for the nics # network_role = ethX for nic, roles in h["nics"]: for role in roles: extra[role] = nic return Host(h["host"], user="root", extra=extra) enos_roles = {} for role, hosts in roles.items(): enos_roles[role] = [to_host(h) for h in hosts] logger.debug(enos_roles) return enos_roles
def insert_arguments_into_sql_query(compilation_result, arguments): """Insert the arguments into the compiled SQL query to form a complete query. Args: compilation_result: CompilationResult, compilation result from the GraphQL compiler. arguments: Dict[str, Any], parameter name -> value, for every parameter the query expects. Returns: SQLAlchemy Selectable, a executable SQL query with parameters bound. """ if compilation_result.language != SQL_LANGUAGE: raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result)) base_query = compilation_result.query return base_query.params(**arguments)
Insert the arguments into the compiled SQL query to form a complete query. Args: compilation_result: CompilationResult, compilation result from the GraphQL compiler. arguments: Dict[str, Any], parameter name -> value, for every parameter the query expects. Returns: SQLAlchemy Selectable, a executable SQL query with parameters bound.
Below is the the instruction that describes the task: ### Input: Insert the arguments into the compiled SQL query to form a complete query. Args: compilation_result: CompilationResult, compilation result from the GraphQL compiler. arguments: Dict[str, Any], parameter name -> value, for every parameter the query expects. Returns: SQLAlchemy Selectable, a executable SQL query with parameters bound. ### Response: def insert_arguments_into_sql_query(compilation_result, arguments): """Insert the arguments into the compiled SQL query to form a complete query. Args: compilation_result: CompilationResult, compilation result from the GraphQL compiler. arguments: Dict[str, Any], parameter name -> value, for every parameter the query expects. Returns: SQLAlchemy Selectable, a executable SQL query with parameters bound. """ if compilation_result.language != SQL_LANGUAGE: raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result)) base_query = compilation_result.query return base_query.params(**arguments)
def create_channel(self, channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly=0, start_time=0, end_time=0, propagate=0, resolution=0, channel_description=''): """ Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason. """ return self.resources.create_channel(channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly, start_time, end_time, propagate, resolution, channel_description)
Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason.
Below is the the instruction that describes the task: ### Input: Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason. ### Response: def create_channel(self, channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly=0, start_time=0, end_time=0, propagate=0, resolution=0, channel_description=''): """ Create a new channel on the Remote, using channel_data. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name channel_type (str): Type of the channel (e.g. `neurodata.IMAGE`) dtype (str): The datatype of the channel's data (e.g. `uint8`) startwindow (int): Window to start in endwindow (int): Window to end in readonly (int): Can others write to this channel? propagate (int): Allow propogation? 1 is True, 0 is False resolution (int): Resolution scaling channel_description (str): Your description of the channel Returns: bool: `True` if successful, `False` otherwise. Raises: ValueError: If your args were bad :( RemoteDataUploadError: If the channel data is valid but upload fails for some other reason. """ return self.resources.create_channel(channel_name, project_name, dataset_name, channel_type, dtype, startwindow, endwindow, readonly, start_time, end_time, propagate, resolution, channel_description)
def get_widget_label_for(self, fieldname, default=None): """Lookup the widget of the field and return the label """ widget = self.get_widget_for(fieldname) if widget is None: return default return widget.label
Lookup the widget of the field and return the label
Below is the the instruction that describes the task: ### Input: Lookup the widget of the field and return the label ### Response: def get_widget_label_for(self, fieldname, default=None): """Lookup the widget of the field and return the label """ widget = self.get_widget_for(fieldname) if widget is None: return default return widget.label
def set_monitoring(module): """ Defines the monitoring method on the module. """ def monitoring(is_monitoring, track_data=None, track_grad=None, track_update=None, track_update_ratio=None): """ Turn monitoring on or off. If any of the keyword arguments are not None, they will be overwritten. """ module.is_monitoring = is_monitoring module.track_data = track_data if track_data is not None else module.track_data module.track_grad = track_grad if track_grad is not None else module.track_grad module.track_update = track_update if track_update is not None else module.track_update module.track_update_ratio = track_update_ratio if track_update_ratio is not None else module.track_update_ratio module.monitoring = monitoring
Defines the monitoring method on the module.
Below is the the instruction that describes the task: ### Input: Defines the monitoring method on the module. ### Response: def set_monitoring(module): """ Defines the monitoring method on the module. """ def monitoring(is_monitoring, track_data=None, track_grad=None, track_update=None, track_update_ratio=None): """ Turn monitoring on or off. If any of the keyword arguments are not None, they will be overwritten. """ module.is_monitoring = is_monitoring module.track_data = track_data if track_data is not None else module.track_data module.track_grad = track_grad if track_grad is not None else module.track_grad module.track_update = track_update if track_update is not None else module.track_update module.track_update_ratio = track_update_ratio if track_update_ratio is not None else module.track_update_ratio module.monitoring = monitoring
def submit_and_connect(self, spec): """Submit a new skein application, and wait to connect to it. If an error occurs before the application connects, the application is killed. Parameters ---------- spec : ApplicationSpec, str, or dict A description of the application to run. Can be an ``ApplicationSpec`` object, a path to a yaml/json file, or a dictionary description of an application specification. Returns ------- app_client : ApplicationClient """ spec = ApplicationSpec._from_any(spec) app_id = self.submit(spec) try: return self.connect(app_id, security=spec.master.security) except BaseException: self.kill_application(app_id) raise
Submit a new skein application, and wait to connect to it. If an error occurs before the application connects, the application is killed. Parameters ---------- spec : ApplicationSpec, str, or dict A description of the application to run. Can be an ``ApplicationSpec`` object, a path to a yaml/json file, or a dictionary description of an application specification. Returns ------- app_client : ApplicationClient
Below is the the instruction that describes the task: ### Input: Submit a new skein application, and wait to connect to it. If an error occurs before the application connects, the application is killed. Parameters ---------- spec : ApplicationSpec, str, or dict A description of the application to run. Can be an ``ApplicationSpec`` object, a path to a yaml/json file, or a dictionary description of an application specification. Returns ------- app_client : ApplicationClient ### Response: def submit_and_connect(self, spec): """Submit a new skein application, and wait to connect to it. If an error occurs before the application connects, the application is killed. Parameters ---------- spec : ApplicationSpec, str, or dict A description of the application to run. Can be an ``ApplicationSpec`` object, a path to a yaml/json file, or a dictionary description of an application specification. Returns ------- app_client : ApplicationClient """ spec = ApplicationSpec._from_any(spec) app_id = self.submit(spec) try: return self.connect(app_id, security=spec.master.security) except BaseException: self.kill_application(app_id) raise
def recover_and_supervise(recovery_file): """ Retrieve monitor data from recovery_file and resume monitoring """ try: logging.info("Attempting to recover Supervisor data from " + recovery_file) with open(recovery_file) as rf: recovery_data = json.load(rf) monitor_data = recovery_data['monitor_data'] dependencies = recovery_data['dependencies'] args = recovery_data['args'] except: logging.error("Could not recover monitor data, exiting...") return 1 logging.info("Data successfully loaded, resuming Supervisor") supervise_until_complete(monitor_data, dependencies, args, recovery_file)
Retrieve monitor data from recovery_file and resume monitoring
Below is the the instruction that describes the task: ### Input: Retrieve monitor data from recovery_file and resume monitoring ### Response: def recover_and_supervise(recovery_file): """ Retrieve monitor data from recovery_file and resume monitoring """ try: logging.info("Attempting to recover Supervisor data from " + recovery_file) with open(recovery_file) as rf: recovery_data = json.load(rf) monitor_data = recovery_data['monitor_data'] dependencies = recovery_data['dependencies'] args = recovery_data['args'] except: logging.error("Could not recover monitor data, exiting...") return 1 logging.info("Data successfully loaded, resuming Supervisor") supervise_until_complete(monitor_data, dependencies, args, recovery_file)
def set_default_value(self, data_id, value=EMPTY, initial_dist=0.0): """ Set the default value of a data node in the dispatcher. :param data_id: Data node id. :type data_id: str :param value: Data node default value. .. note:: If `EMPTY` the previous default value is removed. :type value: T, optional :param initial_dist: Initial distance in the ArciDispatch algorithm when the data node default value is used. :type initial_dist: float, int, optional :return: Self. :rtype: BlueDispatcher """ self.deferred.append(('set_default_value', _call_kw(locals()))) return self
Set the default value of a data node in the dispatcher. :param data_id: Data node id. :type data_id: str :param value: Data node default value. .. note:: If `EMPTY` the previous default value is removed. :type value: T, optional :param initial_dist: Initial distance in the ArciDispatch algorithm when the data node default value is used. :type initial_dist: float, int, optional :return: Self. :rtype: BlueDispatcher
Below is the the instruction that describes the task: ### Input: Set the default value of a data node in the dispatcher. :param data_id: Data node id. :type data_id: str :param value: Data node default value. .. note:: If `EMPTY` the previous default value is removed. :type value: T, optional :param initial_dist: Initial distance in the ArciDispatch algorithm when the data node default value is used. :type initial_dist: float, int, optional :return: Self. :rtype: BlueDispatcher ### Response: def set_default_value(self, data_id, value=EMPTY, initial_dist=0.0): """ Set the default value of a data node in the dispatcher. :param data_id: Data node id. :type data_id: str :param value: Data node default value. .. note:: If `EMPTY` the previous default value is removed. :type value: T, optional :param initial_dist: Initial distance in the ArciDispatch algorithm when the data node default value is used. :type initial_dist: float, int, optional :return: Self. :rtype: BlueDispatcher """ self.deferred.append(('set_default_value', _call_kw(locals()))) return self
def map_stops( feed: "Feed", stop_ids: List[str], stop_style: Dict = STOP_STYLE ): """ Return a Folium map showing the given stops. Parameters ---------- feed : Feed stop_ids : list IDs of trips in ``feed.stops`` stop_style: dictionary Folium CircleMarker parameters to use for styling stops. Returns ------- dictionary A Folium Map depicting the stops as CircleMarkers. Notes ------ - Requires Folium """ import folium as fl # Initialize map my_map = fl.Map(tiles="cartodbpositron") # Create a feature group for the stops and add it to the map group = fl.FeatureGroup(name="Stops") # Add stops to feature group stops = feed.stops.loc[lambda x: x.stop_id.isin(stop_ids)].fillna("n/a") for prop in stops.to_dict(orient="records"): # Add stop lon = prop["stop_lon"] lat = prop["stop_lat"] fl.CircleMarker( location=[lat, lon], popup=fl.Popup(hp.make_html(prop)), **stop_style, ).add_to(group) group.add_to(my_map) # Add layer control fl.LayerControl().add_to(my_map) # Fit map to stop bounds bounds = [ (stops.stop_lat.min(), stops.stop_lon.min()), (stops.stop_lat.max(), stops.stop_lon.max()), ] my_map.fit_bounds(bounds, padding=[1, 1]) return my_map
Return a Folium map showing the given stops. Parameters ---------- feed : Feed stop_ids : list IDs of trips in ``feed.stops`` stop_style: dictionary Folium CircleMarker parameters to use for styling stops. Returns ------- dictionary A Folium Map depicting the stops as CircleMarkers. Notes ------ - Requires Folium
Below is the the instruction that describes the task: ### Input: Return a Folium map showing the given stops. Parameters ---------- feed : Feed stop_ids : list IDs of trips in ``feed.stops`` stop_style: dictionary Folium CircleMarker parameters to use for styling stops. Returns ------- dictionary A Folium Map depicting the stops as CircleMarkers. Notes ------ - Requires Folium ### Response: def map_stops( feed: "Feed", stop_ids: List[str], stop_style: Dict = STOP_STYLE ): """ Return a Folium map showing the given stops. Parameters ---------- feed : Feed stop_ids : list IDs of trips in ``feed.stops`` stop_style: dictionary Folium CircleMarker parameters to use for styling stops. Returns ------- dictionary A Folium Map depicting the stops as CircleMarkers. Notes ------ - Requires Folium """ import folium as fl # Initialize map my_map = fl.Map(tiles="cartodbpositron") # Create a feature group for the stops and add it to the map group = fl.FeatureGroup(name="Stops") # Add stops to feature group stops = feed.stops.loc[lambda x: x.stop_id.isin(stop_ids)].fillna("n/a") for prop in stops.to_dict(orient="records"): # Add stop lon = prop["stop_lon"] lat = prop["stop_lat"] fl.CircleMarker( location=[lat, lon], popup=fl.Popup(hp.make_html(prop)), **stop_style, ).add_to(group) group.add_to(my_map) # Add layer control fl.LayerControl().add_to(my_map) # Fit map to stop bounds bounds = [ (stops.stop_lat.min(), stops.stop_lon.min()), (stops.stop_lat.max(), stops.stop_lon.max()), ] my_map.fit_bounds(bounds, padding=[1, 1]) return my_map
def _disconnect_temporarily(self, port_v, target=True): """Removes a connection between the current connection and the given port :param rafcon.gui.mygaphas.items.ports.PortView port_v: The port that was connected :param bool target: Whether the connection origin or target should be disconnected """ if target: handle = self._connection_v.to_handle() else: handle = self._connection_v.from_handle() port_v.remove_connected_handle(handle) port_v.tmp_disconnect() self._connection_v.reset_port_for_handle(handle) # Redraw state of port to make hover state visible self._redraw_port(port_v)
Removes a connection between the current connection and the given port :param rafcon.gui.mygaphas.items.ports.PortView port_v: The port that was connected :param bool target: Whether the connection origin or target should be disconnected
Below is the the instruction that describes the task: ### Input: Removes a connection between the current connection and the given port :param rafcon.gui.mygaphas.items.ports.PortView port_v: The port that was connected :param bool target: Whether the connection origin or target should be disconnected ### Response: def _disconnect_temporarily(self, port_v, target=True): """Removes a connection between the current connection and the given port :param rafcon.gui.mygaphas.items.ports.PortView port_v: The port that was connected :param bool target: Whether the connection origin or target should be disconnected """ if target: handle = self._connection_v.to_handle() else: handle = self._connection_v.from_handle() port_v.remove_connected_handle(handle) port_v.tmp_disconnect() self._connection_v.reset_port_for_handle(handle) # Redraw state of port to make hover state visible self._redraw_port(port_v)
def main(args_list=None): """ Script which loads variants and annotates them with overlapping genes. Example usage: varcode-genes --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json """ print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) variants_dataframe = variants.to_dataframe() logger.info('\n%s', variants_dataframe) if args.output_csv: variants_dataframe.to_csv(args.output_csv, index=False)
Script which loads variants and annotates them with overlapping genes. Example usage: varcode-genes --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json
Below is the the instruction that describes the task: ### Input: Script which loads variants and annotates them with overlapping genes. Example usage: varcode-genes --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json ### Response: def main(args_list=None): """ Script which loads variants and annotates them with overlapping genes. Example usage: varcode-genes --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json """ print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) variants_dataframe = variants.to_dataframe() logger.info('\n%s', variants_dataframe) if args.output_csv: variants_dataframe.to_csv(args.output_csv, index=False)
def create_group_groups(self, description=None, is_public=None, join_level=None, name=None, storage_quota_mb=None): """ Create a group. Creates a new group. Groups created using the "/api/v1/groups/" endpoint will be community groups. """ path = {} data = {} params = {} # OPTIONAL - name """The name of the group""" if name is not None: data["name"] = name # OPTIONAL - description """A description of the group""" if description is not None: data["description"] = description # OPTIONAL - is_public """whether the group is public (applies only to community groups)""" if is_public is not None: data["is_public"] = is_public # OPTIONAL - join_level """no description""" if join_level is not None: self._validate_enum(join_level, ["parent_context_auto_join", "parent_context_request", "invitation_only"]) data["join_level"] = join_level # OPTIONAL - storage_quota_mb """The allowed file storage for the group, in megabytes. This parameter is ignored if the caller does not have the manage_storage_quotas permission.""" if storage_quota_mb is not None: data["storage_quota_mb"] = storage_quota_mb self.logger.debug("POST /api/v1/groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups".format(**path), data=data, params=params, single_item=True)
Create a group. Creates a new group. Groups created using the "/api/v1/groups/" endpoint will be community groups.
Below is the the instruction that describes the task: ### Input: Create a group. Creates a new group. Groups created using the "/api/v1/groups/" endpoint will be community groups. ### Response: def create_group_groups(self, description=None, is_public=None, join_level=None, name=None, storage_quota_mb=None): """ Create a group. Creates a new group. Groups created using the "/api/v1/groups/" endpoint will be community groups. """ path = {} data = {} params = {} # OPTIONAL - name """The name of the group""" if name is not None: data["name"] = name # OPTIONAL - description """A description of the group""" if description is not None: data["description"] = description # OPTIONAL - is_public """whether the group is public (applies only to community groups)""" if is_public is not None: data["is_public"] = is_public # OPTIONAL - join_level """no description""" if join_level is not None: self._validate_enum(join_level, ["parent_context_auto_join", "parent_context_request", "invitation_only"]) data["join_level"] = join_level # OPTIONAL - storage_quota_mb """The allowed file storage for the group, in megabytes. This parameter is ignored if the caller does not have the manage_storage_quotas permission.""" if storage_quota_mb is not None: data["storage_quota_mb"] = storage_quota_mb self.logger.debug("POST /api/v1/groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups".format(**path), data=data, params=params, single_item=True)
def get_deploy_assets2s3_list(cwd, file="propel.yml"): """ Return the list of all the assets2s3 repo to publish when deploying :param cwd: :param file: :return: list """ conf = propel_deploy_config(cwd=cwd, file=file) return conf["assets2s3"] if "assets2s3" in conf else []
Return the list of all the assets2s3 repo to publish when deploying :param cwd: :param file: :return: list
Below is the the instruction that describes the task: ### Input: Return the list of all the assets2s3 repo to publish when deploying :param cwd: :param file: :return: list ### Response: def get_deploy_assets2s3_list(cwd, file="propel.yml"): """ Return the list of all the assets2s3 repo to publish when deploying :param cwd: :param file: :return: list """ conf = propel_deploy_config(cwd=cwd, file=file) return conf["assets2s3"] if "assets2s3" in conf else []
async def report(self, msg, timeout=5): """Report message to the host manager. """ try: host_manager = await self.env.connect(self.host_manager, timeout=timeout) except: raise ConnectionError("Could not reach host manager ({})." .format(self.host_manager)) ret = await host_manager.handle(msg) return ret
Report message to the host manager.
Below is the the instruction that describes the task: ### Input: Report message to the host manager. ### Response: async def report(self, msg, timeout=5): """Report message to the host manager. """ try: host_manager = await self.env.connect(self.host_manager, timeout=timeout) except: raise ConnectionError("Could not reach host manager ({})." .format(self.host_manager)) ret = await host_manager.handle(msg) return ret
def rot_state(self, x, y): """ Set the rotation state of the camera :param x: viewport x pos :param y: viewport y pos """ if self.last_x is None: self.last_x = x if self.last_y is None: self.last_y = y x_offset = self.last_x - x y_offset = self.last_y - y self.last_x = x self.last_y = y x_offset *= self.mouse_sensitivity y_offset *= self.mouse_sensitivity self.yaw -= x_offset self.pitch += y_offset if self.pitch > 85.0: self.pitch = 85.0 if self.pitch < -85.0: self.pitch = -85.0 self._update_yaw_and_pitch()
Set the rotation state of the camera :param x: viewport x pos :param y: viewport y pos
Below is the the instruction that describes the task: ### Input: Set the rotation state of the camera :param x: viewport x pos :param y: viewport y pos ### Response: def rot_state(self, x, y): """ Set the rotation state of the camera :param x: viewport x pos :param y: viewport y pos """ if self.last_x is None: self.last_x = x if self.last_y is None: self.last_y = y x_offset = self.last_x - x y_offset = self.last_y - y self.last_x = x self.last_y = y x_offset *= self.mouse_sensitivity y_offset *= self.mouse_sensitivity self.yaw -= x_offset self.pitch += y_offset if self.pitch > 85.0: self.pitch = 85.0 if self.pitch < -85.0: self.pitch = -85.0 self._update_yaw_and_pitch()
def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False
get the value for user in gsettings
Below is the the instruction that describes the task: ### Input: get the value for user in gsettings ### Response: def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False
def in_generator(self, generator): """Context manager: set the given generator as the current generator.""" previous_generator = self._current_generator try: self._current_generator = generator yield finally: self._current_generator = previous_generator
Context manager: set the given generator as the current generator.
Below is the the instruction that describes the task: ### Input: Context manager: set the given generator as the current generator. ### Response: def in_generator(self, generator): """Context manager: set the given generator as the current generator.""" previous_generator = self._current_generator try: self._current_generator = generator yield finally: self._current_generator = previous_generator
def _replace_event_shape_in_tensorshape( input_tensorshape, event_shape_in, event_shape_out): """Replaces the event shape dims of a `TensorShape`. Args: input_tensorshape: a `TensorShape` instance in which to attempt replacing event shape. event_shape_in: `Tensor` shape representing the event shape expected to be present in (rightmost dims of) `tensorshape_in`. Must be compatible with the rightmost dims of `tensorshape_in`. event_shape_out: `Tensor` shape representing the new event shape, i.e., the replacement of `event_shape_in`, Returns: output_tensorshape: `TensorShape` with the rightmost `event_shape_in` replaced by `event_shape_out`. Might be partially defined, i.e., `TensorShape(None)`. is_validated: Python `bool` indicating static validation happened. Raises: ValueError: if we can determine the event shape portion of `tensorshape_in` as well as `event_shape_in` both statically, and they are not compatible. "Compatible" here means that they are identical on any dims that are not -1 in `event_shape_in`. """ event_shape_in_ndims = tensorshape_util.num_elements(event_shape_in.shape) if tensorshape_util.rank( input_tensorshape) is None or event_shape_in_ndims is None: return tf.TensorShape(None), False # Not is_validated. input_non_event_ndims = tensorshape_util.rank( input_tensorshape) - event_shape_in_ndims if input_non_event_ndims < 0: raise ValueError( 'Input has fewer ndims ({}) than event shape ndims ({}).'.format( tensorshape_util.rank(input_tensorshape), event_shape_in_ndims)) input_non_event_tensorshape = input_tensorshape[:input_non_event_ndims] input_event_tensorshape = input_tensorshape[input_non_event_ndims:] # Check that `input_event_shape_` and `event_shape_in` are compatible in the # sense that they have equal entries in any position that isn't a `-1` in # `event_shape_in`. Note that our validations at construction time ensure # there is at most one such entry in `event_shape_in`. event_shape_in_ = tf.get_static_value(event_shape_in) is_validated = ( tensorshape_util.is_fully_defined(input_event_tensorshape) and event_shape_in_ is not None) if is_validated: input_event_shape_ = np.int32(input_event_tensorshape) mask = event_shape_in_ >= 0 explicit_input_event_shape_ = input_event_shape_[mask] explicit_event_shape_in_ = event_shape_in_[mask] if not all(explicit_input_event_shape_ == explicit_event_shape_in_): raise ValueError( 'Input `event_shape` does not match `event_shape_in`. ' '({} vs {}).'.format(input_event_shape_, event_shape_in_)) event_tensorshape_out = tensorshape_util.constant_value_as_shape( event_shape_out) if tensorshape_util.rank(event_tensorshape_out) is None: output_tensorshape = tf.TensorShape(None) else: output_tensorshape = tensorshape_util.concatenate( input_non_event_tensorshape, event_tensorshape_out) return output_tensorshape, is_validated
Replaces the event shape dims of a `TensorShape`. Args: input_tensorshape: a `TensorShape` instance in which to attempt replacing event shape. event_shape_in: `Tensor` shape representing the event shape expected to be present in (rightmost dims of) `tensorshape_in`. Must be compatible with the rightmost dims of `tensorshape_in`. event_shape_out: `Tensor` shape representing the new event shape, i.e., the replacement of `event_shape_in`, Returns: output_tensorshape: `TensorShape` with the rightmost `event_shape_in` replaced by `event_shape_out`. Might be partially defined, i.e., `TensorShape(None)`. is_validated: Python `bool` indicating static validation happened. Raises: ValueError: if we can determine the event shape portion of `tensorshape_in` as well as `event_shape_in` both statically, and they are not compatible. "Compatible" here means that they are identical on any dims that are not -1 in `event_shape_in`.
Below is the the instruction that describes the task: ### Input: Replaces the event shape dims of a `TensorShape`. Args: input_tensorshape: a `TensorShape` instance in which to attempt replacing event shape. event_shape_in: `Tensor` shape representing the event shape expected to be present in (rightmost dims of) `tensorshape_in`. Must be compatible with the rightmost dims of `tensorshape_in`. event_shape_out: `Tensor` shape representing the new event shape, i.e., the replacement of `event_shape_in`, Returns: output_tensorshape: `TensorShape` with the rightmost `event_shape_in` replaced by `event_shape_out`. Might be partially defined, i.e., `TensorShape(None)`. is_validated: Python `bool` indicating static validation happened. Raises: ValueError: if we can determine the event shape portion of `tensorshape_in` as well as `event_shape_in` both statically, and they are not compatible. "Compatible" here means that they are identical on any dims that are not -1 in `event_shape_in`. ### Response: def _replace_event_shape_in_tensorshape( input_tensorshape, event_shape_in, event_shape_out): """Replaces the event shape dims of a `TensorShape`. Args: input_tensorshape: a `TensorShape` instance in which to attempt replacing event shape. event_shape_in: `Tensor` shape representing the event shape expected to be present in (rightmost dims of) `tensorshape_in`. Must be compatible with the rightmost dims of `tensorshape_in`. event_shape_out: `Tensor` shape representing the new event shape, i.e., the replacement of `event_shape_in`, Returns: output_tensorshape: `TensorShape` with the rightmost `event_shape_in` replaced by `event_shape_out`. Might be partially defined, i.e., `TensorShape(None)`. is_validated: Python `bool` indicating static validation happened. Raises: ValueError: if we can determine the event shape portion of `tensorshape_in` as well as `event_shape_in` both statically, and they are not compatible. "Compatible" here means that they are identical on any dims that are not -1 in `event_shape_in`. """ event_shape_in_ndims = tensorshape_util.num_elements(event_shape_in.shape) if tensorshape_util.rank( input_tensorshape) is None or event_shape_in_ndims is None: return tf.TensorShape(None), False # Not is_validated. input_non_event_ndims = tensorshape_util.rank( input_tensorshape) - event_shape_in_ndims if input_non_event_ndims < 0: raise ValueError( 'Input has fewer ndims ({}) than event shape ndims ({}).'.format( tensorshape_util.rank(input_tensorshape), event_shape_in_ndims)) input_non_event_tensorshape = input_tensorshape[:input_non_event_ndims] input_event_tensorshape = input_tensorshape[input_non_event_ndims:] # Check that `input_event_shape_` and `event_shape_in` are compatible in the # sense that they have equal entries in any position that isn't a `-1` in # `event_shape_in`. Note that our validations at construction time ensure # there is at most one such entry in `event_shape_in`. event_shape_in_ = tf.get_static_value(event_shape_in) is_validated = ( tensorshape_util.is_fully_defined(input_event_tensorshape) and event_shape_in_ is not None) if is_validated: input_event_shape_ = np.int32(input_event_tensorshape) mask = event_shape_in_ >= 0 explicit_input_event_shape_ = input_event_shape_[mask] explicit_event_shape_in_ = event_shape_in_[mask] if not all(explicit_input_event_shape_ == explicit_event_shape_in_): raise ValueError( 'Input `event_shape` does not match `event_shape_in`. ' '({} vs {}).'.format(input_event_shape_, event_shape_in_)) event_tensorshape_out = tensorshape_util.constant_value_as_shape( event_shape_out) if tensorshape_util.rank(event_tensorshape_out) is None: output_tensorshape = tf.TensorShape(None) else: output_tensorshape = tensorshape_util.concatenate( input_non_event_tensorshape, event_tensorshape_out) return output_tensorshape, is_validated
def ns(self, prefix, tag): """ Given a prefix and an XML tag, output the qualified name for proper namespace handling on output. """ return etree.QName(self.prefixes[prefix], tag)
Given a prefix and an XML tag, output the qualified name for proper namespace handling on output.
Below is the the instruction that describes the task: ### Input: Given a prefix and an XML tag, output the qualified name for proper namespace handling on output. ### Response: def ns(self, prefix, tag): """ Given a prefix and an XML tag, output the qualified name for proper namespace handling on output. """ return etree.QName(self.prefixes[prefix], tag)
def GetChildClassId(self, classId): """ Method extracts and returns the child object list same as the given classId """ childList = [] for ch in self.child: if ch.classId.lower() == classId.lower(): childList.append(ch) return childList
Method extracts and returns the child object list same as the given classId
Below is the the instruction that describes the task: ### Input: Method extracts and returns the child object list same as the given classId ### Response: def GetChildClassId(self, classId): """ Method extracts and returns the child object list same as the given classId """ childList = [] for ch in self.child: if ch.classId.lower() == classId.lower(): childList.append(ch) return childList
def asset_ver_sel_changed(self, index): """Callback for when the version selection has changed Emit asset_taskfile_sel_changed signal. :param index: the selected index :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None """ taskfile = None if index.isValid(): item = index.internalPointer() taskfile = item.internal_data() self.asset_taskfile_sel_changed.emit(taskfile)
Callback for when the version selection has changed Emit asset_taskfile_sel_changed signal. :param index: the selected index :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None
Below is the the instruction that describes the task: ### Input: Callback for when the version selection has changed Emit asset_taskfile_sel_changed signal. :param index: the selected index :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None ### Response: def asset_ver_sel_changed(self, index): """Callback for when the version selection has changed Emit asset_taskfile_sel_changed signal. :param index: the selected index :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None """ taskfile = None if index.isValid(): item = index.internalPointer() taskfile = item.internal_data() self.asset_taskfile_sel_changed.emit(taskfile)
def request(self, uri, method=GET, headers=None, cookies=None, params=None, data=None, post_files=None,**kwargs): """Makes a request using requests @param uri: The uri to send request @param method: Method to use to send request @param headers: Any headers to send with request @param cookies: Request cookies (in addition to session cookies) @param params: Request parameters @param data: Request data @param kwargs: other options to pass to underlying request @rtype: requests.Response @return: The response """ coyote_args = { 'headers': headers, 'cookies': cookies, 'params': params, 'files': post_files, 'data': data, 'verify': self.verify_certificates, } coyote_args.update(kwargs) if method == self.POST: response = self.session.post(uri, **coyote_args) elif method == self.PUT: response = self.session.put(uri, **coyote_args) elif method == self.PATCH: response = self.session.patch(uri, **coyote_args) elif method == self.DELETE: response = self.session.delete(uri, **coyote_args) else: # Default to GET response = self.session.get(uri, **coyote_args) self.responses.append(response) while len(self.responses) > self.max_response_history: self.responses.popleft() return response
Makes a request using requests @param uri: The uri to send request @param method: Method to use to send request @param headers: Any headers to send with request @param cookies: Request cookies (in addition to session cookies) @param params: Request parameters @param data: Request data @param kwargs: other options to pass to underlying request @rtype: requests.Response @return: The response
Below is the the instruction that describes the task: ### Input: Makes a request using requests @param uri: The uri to send request @param method: Method to use to send request @param headers: Any headers to send with request @param cookies: Request cookies (in addition to session cookies) @param params: Request parameters @param data: Request data @param kwargs: other options to pass to underlying request @rtype: requests.Response @return: The response ### Response: def request(self, uri, method=GET, headers=None, cookies=None, params=None, data=None, post_files=None,**kwargs): """Makes a request using requests @param uri: The uri to send request @param method: Method to use to send request @param headers: Any headers to send with request @param cookies: Request cookies (in addition to session cookies) @param params: Request parameters @param data: Request data @param kwargs: other options to pass to underlying request @rtype: requests.Response @return: The response """ coyote_args = { 'headers': headers, 'cookies': cookies, 'params': params, 'files': post_files, 'data': data, 'verify': self.verify_certificates, } coyote_args.update(kwargs) if method == self.POST: response = self.session.post(uri, **coyote_args) elif method == self.PUT: response = self.session.put(uri, **coyote_args) elif method == self.PATCH: response = self.session.patch(uri, **coyote_args) elif method == self.DELETE: response = self.session.delete(uri, **coyote_args) else: # Default to GET response = self.session.get(uri, **coyote_args) self.responses.append(response) while len(self.responses) > self.max_response_history: self.responses.popleft() return response
def scan_module(egg_dir, base, name, stubs): """Check whether module possibly uses unsafe-for-zipfile stuff""" filename = os.path.join(base,name) if filename[:-1] in stubs: return True # Extension module pkg = base[len(egg_dir)+1:].replace(os.sep,'.') module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] if sys.version_info < (3, 3): skip = 8 # skip magic & date else: skip = 12 # skip magic & date & file size f = open(filename,'rb'); f.read(skip) code = marshal.load(f); f.close() safe = True symbols = dict.fromkeys(iter_symbols(code)) for bad in ['__file__', '__path__']: if bad in symbols: log.warn("%s: module references %s", module, bad) safe = False if 'inspect' in symbols: for bad in [ 'getsource', 'getabsfile', 'getsourcefile', 'getfile' 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', 'getinnerframes', 'getouterframes', 'stack', 'trace' ]: if bad in symbols: log.warn("%s: module MAY be using inspect.%s", module, bad) safe = False if '__name__' in symbols and '__main__' in symbols and '.' not in module: if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 log.warn("%s: top-level module may be 'python -m' script", module) safe = False return safe
Check whether module possibly uses unsafe-for-zipfile stuff
Below is the the instruction that describes the task: ### Input: Check whether module possibly uses unsafe-for-zipfile stuff ### Response: def scan_module(egg_dir, base, name, stubs): """Check whether module possibly uses unsafe-for-zipfile stuff""" filename = os.path.join(base,name) if filename[:-1] in stubs: return True # Extension module pkg = base[len(egg_dir)+1:].replace(os.sep,'.') module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] if sys.version_info < (3, 3): skip = 8 # skip magic & date else: skip = 12 # skip magic & date & file size f = open(filename,'rb'); f.read(skip) code = marshal.load(f); f.close() safe = True symbols = dict.fromkeys(iter_symbols(code)) for bad in ['__file__', '__path__']: if bad in symbols: log.warn("%s: module references %s", module, bad) safe = False if 'inspect' in symbols: for bad in [ 'getsource', 'getabsfile', 'getsourcefile', 'getfile' 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', 'getinnerframes', 'getouterframes', 'stack', 'trace' ]: if bad in symbols: log.warn("%s: module MAY be using inspect.%s", module, bad) safe = False if '__name__' in symbols and '__main__' in symbols and '.' not in module: if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 log.warn("%s: top-level module may be 'python -m' script", module) safe = False return safe
def parse_get_tmpl(prs, conn): """Retrieve template. Arguments: prs: parser object of argparse conn: dictionary of connection information """ prs_tmpl_get = prs.add_parser( 'tmpl_get', help='retrieve templates') set_option(prs_tmpl_get, 'template') conn_options(prs_tmpl_get, conn) prs_tmpl_get.set_defaults(func=retrieve_tmpl)
Retrieve template. Arguments: prs: parser object of argparse conn: dictionary of connection information
Below is the the instruction that describes the task: ### Input: Retrieve template. Arguments: prs: parser object of argparse conn: dictionary of connection information ### Response: def parse_get_tmpl(prs, conn): """Retrieve template. Arguments: prs: parser object of argparse conn: dictionary of connection information """ prs_tmpl_get = prs.add_parser( 'tmpl_get', help='retrieve templates') set_option(prs_tmpl_get, 'template') conn_options(prs_tmpl_get, conn) prs_tmpl_get.set_defaults(func=retrieve_tmpl)
def register_callback(self, fun, npts): """ Provide a function to be executed periodically on data collection, every time after the specified number of points are collected. :param fun: the function that gets called, it must have a single positional argument that will be the data buffer read :type fun: function :param npts: The number of data points collected before the function is called. :type npts: int """ self.callback_fun = fun self.n = npts self.AutoRegisterEveryNSamplesEvent(DAQmx_Val_Acquired_Into_Buffer, npts, 0, name=u"_run_callback")
Provide a function to be executed periodically on data collection, every time after the specified number of points are collected. :param fun: the function that gets called, it must have a single positional argument that will be the data buffer read :type fun: function :param npts: The number of data points collected before the function is called. :type npts: int
Below is the the instruction that describes the task: ### Input: Provide a function to be executed periodically on data collection, every time after the specified number of points are collected. :param fun: the function that gets called, it must have a single positional argument that will be the data buffer read :type fun: function :param npts: The number of data points collected before the function is called. :type npts: int ### Response: def register_callback(self, fun, npts): """ Provide a function to be executed periodically on data collection, every time after the specified number of points are collected. :param fun: the function that gets called, it must have a single positional argument that will be the data buffer read :type fun: function :param npts: The number of data points collected before the function is called. :type npts: int """ self.callback_fun = fun self.n = npts self.AutoRegisterEveryNSamplesEvent(DAQmx_Val_Acquired_Into_Buffer, npts, 0, name=u"_run_callback")
def tempdir(fun): '''For use as a decorator of instance methods - creates a temporary dir named self._tempdir and then deletes it after the method runs. :param fun: function to decorate :type fun: instance method ''' def wrapper(*args, **kwargs): self = args[0] if os.path.isdir(self._tempdir): shutil.rmtree(self._tempdir) self._tempdir = tempfile.mkdtemp() # If the method raises an exception, delete the temporary dir try: retval = fun(*args, **kwargs) finally: shutil.rmtree(self._tempdir) if os.path.isdir(self._tempdir): shutil.rmtree(self._tempdir) return retval return wrapper
For use as a decorator of instance methods - creates a temporary dir named self._tempdir and then deletes it after the method runs. :param fun: function to decorate :type fun: instance method
Below is the the instruction that describes the task: ### Input: For use as a decorator of instance methods - creates a temporary dir named self._tempdir and then deletes it after the method runs. :param fun: function to decorate :type fun: instance method ### Response: def tempdir(fun): '''For use as a decorator of instance methods - creates a temporary dir named self._tempdir and then deletes it after the method runs. :param fun: function to decorate :type fun: instance method ''' def wrapper(*args, **kwargs): self = args[0] if os.path.isdir(self._tempdir): shutil.rmtree(self._tempdir) self._tempdir = tempfile.mkdtemp() # If the method raises an exception, delete the temporary dir try: retval = fun(*args, **kwargs) finally: shutil.rmtree(self._tempdir) if os.path.isdir(self._tempdir): shutil.rmtree(self._tempdir) return retval return wrapper
def _connect_client(self): """ Wait for a client to connect to this pipe. """ overlapped = OVERLAPPED() overlapped.hEvent = create_event() while True: success = windll.kernel32.ConnectNamedPipe( self.pipe_handle, byref(overlapped)) if success: return last_error = windll.kernel32.GetLastError() if last_error == ERROR_IO_PENDING: yield From(wait_for_event(overlapped.hEvent)) # XXX: Call GetOverlappedResult. return # Connection succeeded. else: raise Exception('connect failed with error code' + str(last_error))
Wait for a client to connect to this pipe.
Below is the the instruction that describes the task: ### Input: Wait for a client to connect to this pipe. ### Response: def _connect_client(self): """ Wait for a client to connect to this pipe. """ overlapped = OVERLAPPED() overlapped.hEvent = create_event() while True: success = windll.kernel32.ConnectNamedPipe( self.pipe_handle, byref(overlapped)) if success: return last_error = windll.kernel32.GetLastError() if last_error == ERROR_IO_PENDING: yield From(wait_for_event(overlapped.hEvent)) # XXX: Call GetOverlappedResult. return # Connection succeeded. else: raise Exception('connect failed with error code' + str(last_error))
def install(module): ''' Install a Perl module from CPAN CLI Example: .. code-block:: bash salt '*' cpan.install Template::Alloy ''' ret = { 'old': None, 'new': None, } old_info = show(module) cmd = 'cpan -i {0}'.format(module) out = __salt__['cmd.run'](cmd) if "don't know what it is" in out: ret['error'] = 'CPAN cannot identify this package' return ret new_info = show(module) ret['old'] = old_info.get('installed version', None) ret['new'] = new_info['installed version'] return ret
Install a Perl module from CPAN CLI Example: .. code-block:: bash salt '*' cpan.install Template::Alloy
Below is the the instruction that describes the task: ### Input: Install a Perl module from CPAN CLI Example: .. code-block:: bash salt '*' cpan.install Template::Alloy ### Response: def install(module): ''' Install a Perl module from CPAN CLI Example: .. code-block:: bash salt '*' cpan.install Template::Alloy ''' ret = { 'old': None, 'new': None, } old_info = show(module) cmd = 'cpan -i {0}'.format(module) out = __salt__['cmd.run'](cmd) if "don't know what it is" in out: ret['error'] = 'CPAN cannot identify this package' return ret new_info = show(module) ret['old'] = old_info.get('installed version', None) ret['new'] = new_info['installed version'] return ret
def rolling_window_sequences(X, index, window_size, target_size, target_column): """Create rolling window sequences out of timeseries data.""" out_X = list() out_y = list() X_index = list() y_index = list() target = X[:, target_column] for start in range(len(X) - window_size - target_size + 1): end = start + window_size out_X.append(X[start:end]) out_y.append(target[end:end + target_size]) X_index.append(index[start]) y_index.append(index[end]) return np.asarray(out_X), np.asarray(out_y), np.asarray(X_index), np.asarray(y_index)
Create rolling window sequences out of timeseries data.
Below is the the instruction that describes the task: ### Input: Create rolling window sequences out of timeseries data. ### Response: def rolling_window_sequences(X, index, window_size, target_size, target_column): """Create rolling window sequences out of timeseries data.""" out_X = list() out_y = list() X_index = list() y_index = list() target = X[:, target_column] for start in range(len(X) - window_size - target_size + 1): end = start + window_size out_X.append(X[start:end]) out_y.append(target[end:end + target_size]) X_index.append(index[start]) y_index.append(index[end]) return np.asarray(out_X), np.asarray(out_y), np.asarray(X_index), np.asarray(y_index)
def clear_list_value(self, value): """ Clean the argument value to eliminate None or Falsy values if needed. """ # Don't go any further: this value is empty. if not value: return self.empty_value # Clean empty items if wanted if self.clean_empty: value = [v for v in value if v] return value or self.empty_value
Clean the argument value to eliminate None or Falsy values if needed.
Below is the the instruction that describes the task: ### Input: Clean the argument value to eliminate None or Falsy values if needed. ### Response: def clear_list_value(self, value): """ Clean the argument value to eliminate None or Falsy values if needed. """ # Don't go any further: this value is empty. if not value: return self.empty_value # Clean empty items if wanted if self.clean_empty: value = [v for v in value if v] return value or self.empty_value
def module2uri(self, module_name): """Convert an encoded module name to an unencoded source uri""" assert module_name.startswith(self.module_prefix), 'incompatible module name' path = module_name[len(self.module_prefix):] path = path.replace('&#DOT', '.') return path.replace('&#SEP', os.sep)
Convert an encoded module name to an unencoded source uri
Below is the the instruction that describes the task: ### Input: Convert an encoded module name to an unencoded source uri ### Response: def module2uri(self, module_name): """Convert an encoded module name to an unencoded source uri""" assert module_name.startswith(self.module_prefix), 'incompatible module name' path = module_name[len(self.module_prefix):] path = path.replace('&#DOT', '.') return path.replace('&#SEP', os.sep)
def extract_all_ss_dssp(in_dssp, path=True): """Uses DSSP to extract secondary structure information on every residue. Parameters ---------- in_dssp : str Path to DSSP file. path : bool, optional Indicates if pdb is a path or a string. Returns ------- dssp_residues : [tuple] Each internal list contains: [0] int Residue number [1] str Secondary structure type [2] str Chain identifier [3] str Residue type [4] float Phi torsion angle [5] float Psi torsion angle [6] int dssp solvent accessibility """ if path: with open(in_dssp, 'r') as inf: dssp_out = inf.read() else: dssp_out = in_dssp[:] dssp_residues = [] active = False for line in dssp_out.splitlines(): if active: try: res_num = int(line[5:10].strip()) chain = line[10:12].strip() residue = line[13] ss_type = line[16] phi = float(line[103:109].strip()) psi = float(line[109:116].strip()) acc = int(line[35:38].strip()) dssp_residues.append( (res_num, ss_type, chain, residue, phi, psi, acc)) except ValueError: pass else: if line[2] == '#': active = True return dssp_residues
Uses DSSP to extract secondary structure information on every residue. Parameters ---------- in_dssp : str Path to DSSP file. path : bool, optional Indicates if pdb is a path or a string. Returns ------- dssp_residues : [tuple] Each internal list contains: [0] int Residue number [1] str Secondary structure type [2] str Chain identifier [3] str Residue type [4] float Phi torsion angle [5] float Psi torsion angle [6] int dssp solvent accessibility
Below is the the instruction that describes the task: ### Input: Uses DSSP to extract secondary structure information on every residue. Parameters ---------- in_dssp : str Path to DSSP file. path : bool, optional Indicates if pdb is a path or a string. Returns ------- dssp_residues : [tuple] Each internal list contains: [0] int Residue number [1] str Secondary structure type [2] str Chain identifier [3] str Residue type [4] float Phi torsion angle [5] float Psi torsion angle [6] int dssp solvent accessibility ### Response: def extract_all_ss_dssp(in_dssp, path=True): """Uses DSSP to extract secondary structure information on every residue. Parameters ---------- in_dssp : str Path to DSSP file. path : bool, optional Indicates if pdb is a path or a string. Returns ------- dssp_residues : [tuple] Each internal list contains: [0] int Residue number [1] str Secondary structure type [2] str Chain identifier [3] str Residue type [4] float Phi torsion angle [5] float Psi torsion angle [6] int dssp solvent accessibility """ if path: with open(in_dssp, 'r') as inf: dssp_out = inf.read() else: dssp_out = in_dssp[:] dssp_residues = [] active = False for line in dssp_out.splitlines(): if active: try: res_num = int(line[5:10].strip()) chain = line[10:12].strip() residue = line[13] ss_type = line[16] phi = float(line[103:109].strip()) psi = float(line[109:116].strip()) acc = int(line[35:38].strip()) dssp_residues.append( (res_num, ss_type, chain, residue, phi, psi, acc)) except ValueError: pass else: if line[2] == '#': active = True return dssp_residues
def disconnect(self, slot): """ Disconnects the slot from the signal """ if not callable(slot): return if inspect.ismethod(slot): # If it's a method, then find it by its instance slotSelf = slot.__self__ for s in self._slots: if isinstance(s, weakref.WeakKeyDictionary) and (slotSelf in s) and (s[slotSelf] is slot.__func__): self._slots.remove(s) break elif isinstance(slot, partial) or '<' in slot.__name__: # If it's a partial or lambda, try to remove directly try: self._slots.remove(slot) except ValueError: pass else: # It's probably a function, so try to remove by weakref try: self._slots.remove(weakref.ref(slot)) except ValueError: pass
Disconnects the slot from the signal
Below is the the instruction that describes the task: ### Input: Disconnects the slot from the signal ### Response: def disconnect(self, slot): """ Disconnects the slot from the signal """ if not callable(slot): return if inspect.ismethod(slot): # If it's a method, then find it by its instance slotSelf = slot.__self__ for s in self._slots: if isinstance(s, weakref.WeakKeyDictionary) and (slotSelf in s) and (s[slotSelf] is slot.__func__): self._slots.remove(s) break elif isinstance(slot, partial) or '<' in slot.__name__: # If it's a partial or lambda, try to remove directly try: self._slots.remove(slot) except ValueError: pass else: # It's probably a function, so try to remove by weakref try: self._slots.remove(weakref.ref(slot)) except ValueError: pass
def convert(self, path, version, target): """Converts the specified source file to a new version number.""" source = self.comparer.get_representation(path) lines = [ '# <fortpy version="{}"></fortpy>\n'.format(version) ] for line in self.comparer.template.contents[version].preamble: lines.append(line.write(source.preamble, source.version, source.stored) + "\n") for line in self.comparer.template.contents[version].body: for valueset in source.body: lines.append(line.write(valueset, source.version, source.stored) + "\n") with open(os.path.expanduser(target), 'w') as f: f.writelines(lines)
Converts the specified source file to a new version number.
Below is the the instruction that describes the task: ### Input: Converts the specified source file to a new version number. ### Response: def convert(self, path, version, target): """Converts the specified source file to a new version number.""" source = self.comparer.get_representation(path) lines = [ '# <fortpy version="{}"></fortpy>\n'.format(version) ] for line in self.comparer.template.contents[version].preamble: lines.append(line.write(source.preamble, source.version, source.stored) + "\n") for line in self.comparer.template.contents[version].body: for valueset in source.body: lines.append(line.write(valueset, source.version, source.stored) + "\n") with open(os.path.expanduser(target), 'w') as f: f.writelines(lines)
def parse_remote(cls, filename): """Parses a remote filename into bucket and key information. Handles S3 with optional region name specified in key: BUCKETNAME@REGIONNAME/KEY """ parts = filename.split("//")[-1].split("/", 1) bucket, key = parts if len(parts) == 2 else (parts[0], None) if bucket.find("@") > 0: bucket, region = bucket.split("@") else: region = None return cls._REMOTE_FILE("s3", bucket, key, region)
Parses a remote filename into bucket and key information. Handles S3 with optional region name specified in key: BUCKETNAME@REGIONNAME/KEY
Below is the the instruction that describes the task: ### Input: Parses a remote filename into bucket and key information. Handles S3 with optional region name specified in key: BUCKETNAME@REGIONNAME/KEY ### Response: def parse_remote(cls, filename): """Parses a remote filename into bucket and key information. Handles S3 with optional region name specified in key: BUCKETNAME@REGIONNAME/KEY """ parts = filename.split("//")[-1].split("/", 1) bucket, key = parts if len(parts) == 2 else (parts[0], None) if bucket.find("@") > 0: bucket, region = bucket.split("@") else: region = None return cls._REMOTE_FILE("s3", bucket, key, region)
def synced(func): ''' Decorator for functions that should be called synchronously from another thread :param func: function to call ''' def wrapper(self, *args, **kwargs): ''' Actual wrapper for the synchronous function ''' task = DataManagerTask(func, *args, **kwargs) self.submit_task(task) return task.get_results() return wrapper
Decorator for functions that should be called synchronously from another thread :param func: function to call
Below is the the instruction that describes the task: ### Input: Decorator for functions that should be called synchronously from another thread :param func: function to call ### Response: def synced(func): ''' Decorator for functions that should be called synchronously from another thread :param func: function to call ''' def wrapper(self, *args, **kwargs): ''' Actual wrapper for the synchronous function ''' task = DataManagerTask(func, *args, **kwargs) self.submit_task(task) return task.get_results() return wrapper
def GetAnalysisStatusUpdateCallback(self): """Retrieves the analysis status update callback function. Returns: function: status update callback function or None if not available. """ if self._mode == self.MODE_LINEAR: return self._PrintAnalysisStatusUpdateLinear if self._mode == self.MODE_WINDOW: return self._PrintAnalysisStatusUpdateWindow return None
Retrieves the analysis status update callback function. Returns: function: status update callback function or None if not available.
Below is the the instruction that describes the task: ### Input: Retrieves the analysis status update callback function. Returns: function: status update callback function or None if not available. ### Response: def GetAnalysisStatusUpdateCallback(self): """Retrieves the analysis status update callback function. Returns: function: status update callback function or None if not available. """ if self._mode == self.MODE_LINEAR: return self._PrintAnalysisStatusUpdateLinear if self._mode == self.MODE_WINDOW: return self._PrintAnalysisStatusUpdateWindow return None
def assign_edge_colors_and_widths(self): """ Resolve conflict of 'node_color' and 'node_style['fill'] args which are redundant. Default is node_style.fill unless user entered node_color. To enter multiple colors user must use node_color not style fill. Either way, we build a list of colors to pass to Drawing.node_colors which is then written to the marker as a fill CSS attribute. """ # node_color overrides fill. Tricky to catch cuz it can be many types. # SET edge_widths and POP edge_style.stroke-width if self.style.edge_widths is None: if not self.style.edge_style["stroke-width"]: self.style.edge_style.pop("stroke-width") self.style.edge_style.pop("stroke") self.edge_widths = [None] * self.nedges else: if isinstance(self.style.edge_style["stroke-width"], (list, tuple)): raise ToytreeError( "Use edge_widths not edge_style for multiple edge widths") # check the color width = self.style.edge_style["stroke-width"] self.style.edge_style.pop("stroke-width") self.edge_widths = [width] * self.nedges else: self.style.edge_style.pop("stroke-width") if isinstance(self.style.edge_widths, (str, int)): self.edge_widths = [int(self.style.edge_widths)] * self.nedges elif isinstance(self.style.edge_widths, (list, tuple)): if len(self.style.edge_widths) != self.nedges: raise ToytreeError("edge_widths arg is the wrong length") for cidx in range(self.nedges): self.edge_widths[cidx] = self.style.edge_widths[cidx] # SET edge_colors and POP edge_style.stroke if self.style.edge_colors is None: if self.style.edge_style["stroke"] is None: self.style.edge_style.pop("stroke") self.edge_colors = [None] * self.nedges else: if isinstance(self.style.edge_style["stroke"], (list, tuple)): raise ToytreeError( "Use edge_colors not edge_style for multiple edge colors") # check the color color = self.style.edge_style["stroke"] if isinstance(color, (np.ndarray, np.void, list, tuple)): color = toyplot.color.to_css(color) self.style.edge_style.pop("stroke") self.edge_colors = [color] * self.nedges # otherwise parse node_color else: self.style.edge_style.pop("stroke") if isinstance(self.style.edge_colors, (str, int)): # check the color color = self.style.edge_colors if isinstance(color, (np.ndarray, np.void, list, tuple)): color = toyplot.color.to_css(color) self.edge_colors = [color] * self.nedges elif isinstance(self.style.edge_colors, (list, tuple)): if len(self.style.edge_colors) != self.nedges: raise ToytreeError("edge_colors arg is the wrong length") for cidx in range(self.nedges): self.edge_colors[cidx] = self.style.edge_colors[cidx] # do not allow empty edge_colors or widths self.edge_colors = [i if i else "#262626" for i in self.edge_colors] self.edge_widths = [i if i else 2 for i in self.edge_widths]
Resolve conflict of 'node_color' and 'node_style['fill'] args which are redundant. Default is node_style.fill unless user entered node_color. To enter multiple colors user must use node_color not style fill. Either way, we build a list of colors to pass to Drawing.node_colors which is then written to the marker as a fill CSS attribute.
Below is the the instruction that describes the task: ### Input: Resolve conflict of 'node_color' and 'node_style['fill'] args which are redundant. Default is node_style.fill unless user entered node_color. To enter multiple colors user must use node_color not style fill. Either way, we build a list of colors to pass to Drawing.node_colors which is then written to the marker as a fill CSS attribute. ### Response: def assign_edge_colors_and_widths(self): """ Resolve conflict of 'node_color' and 'node_style['fill'] args which are redundant. Default is node_style.fill unless user entered node_color. To enter multiple colors user must use node_color not style fill. Either way, we build a list of colors to pass to Drawing.node_colors which is then written to the marker as a fill CSS attribute. """ # node_color overrides fill. Tricky to catch cuz it can be many types. # SET edge_widths and POP edge_style.stroke-width if self.style.edge_widths is None: if not self.style.edge_style["stroke-width"]: self.style.edge_style.pop("stroke-width") self.style.edge_style.pop("stroke") self.edge_widths = [None] * self.nedges else: if isinstance(self.style.edge_style["stroke-width"], (list, tuple)): raise ToytreeError( "Use edge_widths not edge_style for multiple edge widths") # check the color width = self.style.edge_style["stroke-width"] self.style.edge_style.pop("stroke-width") self.edge_widths = [width] * self.nedges else: self.style.edge_style.pop("stroke-width") if isinstance(self.style.edge_widths, (str, int)): self.edge_widths = [int(self.style.edge_widths)] * self.nedges elif isinstance(self.style.edge_widths, (list, tuple)): if len(self.style.edge_widths) != self.nedges: raise ToytreeError("edge_widths arg is the wrong length") for cidx in range(self.nedges): self.edge_widths[cidx] = self.style.edge_widths[cidx] # SET edge_colors and POP edge_style.stroke if self.style.edge_colors is None: if self.style.edge_style["stroke"] is None: self.style.edge_style.pop("stroke") self.edge_colors = [None] * self.nedges else: if isinstance(self.style.edge_style["stroke"], (list, tuple)): raise ToytreeError( "Use edge_colors not edge_style for multiple edge colors") # check the color color = self.style.edge_style["stroke"] if isinstance(color, (np.ndarray, np.void, list, tuple)): color = toyplot.color.to_css(color) self.style.edge_style.pop("stroke") self.edge_colors = [color] * self.nedges # otherwise parse node_color else: self.style.edge_style.pop("stroke") if isinstance(self.style.edge_colors, (str, int)): # check the color color = self.style.edge_colors if isinstance(color, (np.ndarray, np.void, list, tuple)): color = toyplot.color.to_css(color) self.edge_colors = [color] * self.nedges elif isinstance(self.style.edge_colors, (list, tuple)): if len(self.style.edge_colors) != self.nedges: raise ToytreeError("edge_colors arg is the wrong length") for cidx in range(self.nedges): self.edge_colors[cidx] = self.style.edge_colors[cidx] # do not allow empty edge_colors or widths self.edge_colors = [i if i else "#262626" for i in self.edge_colors] self.edge_widths = [i if i else 2 for i in self.edge_widths]