code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
import IPy from .Errors import ConfigError from .Helpers import BasicConfigElement from .Logging import logger class ExpResCriterion_DNSRecord(BasicConfigElement): """DNS record Test properties which are common to all DNS record types. `type`: record's type. Must be one of the DNS record types implemented and described below. `name` (optional): list of expected names. `ttl_min` (optional): minimum TTL that is expected for the record. `ttl_max` (optional): maximum TTL that is expected for the record. `class` (optional): expected class for the record. Match when all the defined criteria are met: - record name must be within the list of given names (`name`); - record TTL must be >= `ttl_min` and <= `ttl_max`; - record class must be equal to `class`. On the basis of record's `type`, further parameters may be needed. Example: dns_answers: answers: - type: A name: www.ripe.net. address: 193.0.6.139 - type: AAAA name: - www.ripe.net. - ripe.net. ttl_min: 604800 address: 2001:67c:2e8:22::c100:0/64 """ RECORD_TYPE = "" MANDATORY_CFG_FIELDS = ["type"] OPTIONAL_CFG_FIELDS = ["name", "ttl_min", "ttl_max", "class"] @classmethod def get_cfg_fields(cls): m = set(ExpResCriterion_DNSRecord.MANDATORY_CFG_FIELDS) o = set(ExpResCriterion_DNSRecord.OPTIONAL_CFG_FIELDS) m.update(cls.MANDATORY_CFG_FIELDS) o.update(cls.OPTIONAL_CFG_FIELDS) return m, o def __init__(self, cfg): BasicConfigElement.__init__(self, cfg) self.normalize_fields() self.type = self._enforce_param("type", str) self.name = self._enforce_list("name", str) self.ttl_min = self._enforce_param("ttl_min", int) self.ttl_max = self._enforce_param("ttl_max", int) self.klass = self._enforce_param("class", str) def __str__(self): r = "{}: ".format(self.RECORD_TYPE) if self.name: if len(self.name) > 1: r += "name in {}, " else: r += "name {}, " r = r.format(", ".join(self.name)) if self.ttl_min: r += "ttl < {}, ".format(self.ttl_min) if self.ttl_max: r += "ttl > {}, ".format(self.ttl_max) if self.klass: r += "class {}, ".format(self.klass) return r def record_base_matches(self, record): if self.RECORD_TYPE != record.type: logger.debug( " record type {} is not {}".format( record.type, self.RECORD_TYPE ) ) return False if self.name: if record.name not in self.name: logger.debug( " record name {} is not {}".format( record.name, ", ".join(self.name) ) ) return False if self.ttl_min: if record.ttl < self.ttl_min: logger.debug( " record TTL {} < {}".format( record.ttl, self.ttl_min ) ) return False if self.ttl_max: if record.ttl > self.ttl_max: logger.debug( " record TTL {} > {}".format( record.ttl, self.ttl_max ) ) return False if self.klass: if record.klass != self.klass: logger.debug( "record class {} != {}".format( record.klass, self.klass ) ) return False return True def _record_matches(self, record): raise NotImplementedError() def record_matches(self, record): return self.record_base_matches(record) and \ self._record_matches(record) class ExpResCriterion_DNSRecord_A(ExpResCriterion_DNSRecord): """A record Verify if record's type is A and if received address match the expectations. `address`: list of IPv4 addresses (or IPv4 prefixes). Match when record's type is A and resolved address is one of the given addresses (or falls within one of the given prefixes). """ RECORD_TYPE = "A" MANDATORY_CFG_FIELDS = ["address"] OPTIONAL_CFG_FIELDS = [] IP_VER = 4 def __init__(self, cfg): ExpResCriterion_DNSRecord.__init__(self, cfg) self.address = [] addresses = self._enforce_list("address", str) for address in addresses: try: ip = IPy.IP(address) except: raise ConfigError( "Invalid IP for {} record: {}".format( self.RECORD_TYPE, address ) ) if ip.version() != self.IP_VER: raise ConfigError( "Invalid IP version ({}) for record type {}.".format( ip.version(), self.RECORD_TYPE ) ) self.address.append(ip) def _record_matches(self, record): try: ip = IPy.IP(record.address) except: logger.debug( " invalid {} record from result: {}".format( self.RECORD_TYPE, record.address ) ) return False logger.debug( " verifying if {} matches {}...".format( str(ip), ", ".join(map(str, self.address)) ) ) for address in self.address: if address.prefixlen() in [32, 128]: if address == ip: return True else: if ip in address: return True return False def __str__(self): return ExpResCriterion_DNSRecord.__str__(self) + \ ", ".join(map(str, self.address)) class ExpResCriterion_DNSRecord_AAAA(ExpResCriterion_DNSRecord_A): """AAAA record Verify if record's type is AAAA and if received address match the expectations. `address`: list of IPv6 addresses (or IPv6 prefixes). Match when record's type is AAAA and resolved address is one of the given addresses (or falls within one of the given prefixes). """ RECORD_TYPE = "AAAA" IP_VER = 6 class ExpResCriterion_DNSRecord_NS(ExpResCriterion_DNSRecord): """NS record Verify if record's type is NS and if target is one of the expected ones. `target`: list of expected targets. Match when record's type is NS and received target is one of those given in `target`. """ RECORD_TYPE = "NS" MANDATORY_CFG_FIELDS = ["target"] OPTIONAL_CFG_FIELDS = [] def __init__(self, cfg): ExpResCriterion_DNSRecord.__init__(self, cfg) self.target = self._enforce_list("target", str) def _record_matches(self, record): logger.debug( " verifying if {} target {} in {}".format( self.RECORD_TYPE, record.target, ", ".join(self.target) ) ) return record.target in self.target def __str__(self): return ExpResCriterion_DNSRecord.__str__(self) + \ ", ".join(map(str, self.target)) class ExpResCriterion_DNSRecord_CNAME(ExpResCriterion_DNSRecord_NS): """CNAME record Verify if record's type is CNAME and if target is one of the expected ones. `target`: list of expected targets. Match when record's type is CNAME and received target is one of those given in `target`. """ RECORD_TYPE = "CNAME" HANDLED_RECORD_TYPES = [ ExpResCriterion_DNSRecord_A, ExpResCriterion_DNSRecord_AAAA, ExpResCriterion_DNSRecord_NS, ExpResCriterion_DNSRecord_CNAME ]
/ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/ExpResCriteriaDNSRecords.py
0.814385
0.414721
ExpResCriteriaDNSRecords.py
pypi
from .Errors import ConfigError from .ExpResCriteriaBase import ExpResCriterion from .ExpResCriteriaDNSRecords import HANDLED_RECORD_TYPES from .Logging import logger from .ParsedResults import ParsedResult_DNSHeader, ParsedResult_EDNS class ExpResCriterion_DNSBased(ExpResCriterion): def response_matches(self, response): raise NotImplementedError() def prepare_response(self, result, response): raise NotImplementedError() def prepare(self, result): for response in result.responses: self.prepare_response(result, response) def result_matches(self, result): response_found = False for response in result.responses: if response.abuf: response_found = True if not self.response_matches(response): return False if not response_found: logger.debug(" no response found") return response_found class ExpResCriterion_DNSFlags(ExpResCriterion_DNSBased): """Criterion: dns_flags Verify if DNS responses received by a probe have the expected headers flags on. Available for: dns. `dns_flags`: list of expected DNS flag ("aa", "ad", "cd", "qr", "ra", "rd"). Match when all the responses received by a probe have all the expected flags on. Example: expected_results: AA_and_AD: dns_flags: - aa - ad """ CRITERION_NAME = "dns_flags" AVAILABLE_FOR_MSM_TYPE = ["dns"] MANDATORY_CFG_FIELDS = [] OPTIONAL_CFG_FIELDS = [] def __init__(self, cfg, expres): ExpResCriterion_DNSBased.__init__(self, cfg, expres) self.dns_flags = set() dns_flags = self._enforce_list("dns_flags", str) for flag in dns_flags: if flag.lower() not in ParsedResult_DNSHeader.DNS_HEADER_FLAGS: raise ConfigError("Invalid DNS flag: {}".format(flag)) if flag.lower() not in self.dns_flags: self.dns_flags.add(flag.lower()) def __str__(self): return "DNS response flags: {}".format( ", ".join(sorted(self.dns_flags)) ) def prepare_response(self, result, response): res = ParsedResult_DNSHeader(self.expres.monitor, result, response) self.response_flags = res.flags def response_matches(self, response): response_flags = self.response_flags logger.debug( " verifying if expected flags ({}) are " "in the response's flags ({})...".format( ", ".join(self.dns_flags), ", ".join(response_flags) ) ) if not self.dns_flags.issubset(response_flags): return False return True class ExpResCriterion_DNSRCode(ExpResCriterion_DNSBased): """Criterion: dns_rcode Verify if DNS responses received by a probe have the expected rcode. Available for: dns. `dns_rcode`: list of expected DNS rcodes ("NOERROR", "FORMERR", "SERVFAIL", "NXDOMAIN", "NOTIMP", "REFUSED", "YXDOMAIN", "YXRRSET", "NXRRSET", "NOTAUTH", "NOTZONE", "BADVERS"). Match when all the responses received by a probe have one of the expected rcodes listed in `dns_rcode`. Example: expected_results: DNS_NoError_or_NXDomain: dns_rcode: - "NOERROR" - "NXDOMAIN" """ CRITERION_NAME = "dns_rcode" AVAILABLE_FOR_MSM_TYPE = ["dns"] MANDATORY_CFG_FIELDS = [] OPTIONAL_CFG_FIELDS = [] def __init__(self, cfg, expres): ExpResCriterion_DNSBased.__init__(self, cfg, expres) dns_rcode = self._enforce_list("dns_rcode", str) self.dns_rcode = set() for rcode in dns_rcode: if rcode not in ParsedResult_DNSHeader.DNS_RCODES: raise ConfigError( "Invalid rcode: {}. Must be one of {}.".format( rcode, ", ".join(ParsedResult_DNSHeader.DNS_RCODES) ) ) self.dns_rcode.add(rcode) def __str__(self): return "DNS rcodes: {}".format( ", ".join(sorted(self.dns_rcode)) ) def prepare_response(self, result, response): res = ParsedResult_DNSHeader(self.expres.monitor, result, response) self.response_rcode = res.rcode def response_matches(self, response): response_rcode = self.response_rcode logger.debug( " verifying if response's rcode ({}) is one of " "the expected ones ({})...".format( response_rcode, ", ".join(self.dns_rcode) ) ) if response_rcode not in self.dns_rcode: return False return True class ExpResCriterion_EDNS(ExpResCriterion_DNSBased): """Criterion: edns Verify EDNS extension of DNS responses received by probes. Available for: dns. `edns`: boolean indicating whether EDNS support is expected or not. `edns_size` (optional): minimum expected size. `edns_do` (optional): boolean indicating the expected presence of DO flag. `edns_nsid` (optional): list of expected NSID values. The optional parameters are taken into account only when `edns` is True. If `edns` is True, match when all the responses contain EDNS extension, otherwise when all the responses do not contain it. If `edns_size` is given, the size reported must be >= than the expected one. If `edns_do` is given, all the responses must have (or have not) the DO flag on. If `edns_nsid` is given, all the responses must contain and EDNS NSID option which falls within the list of values herein specified. Examples: edns: true edns: true edns_do: true edns: true edns_nsid: - "ods01.l.root-servers.org" - "kbp01.l.root-servers.org" """ CRITERION_NAME = "edns" AVAILABLE_FOR_MSM_TYPE = ["dns"] MANDATORY_CFG_FIELDS = [] OPTIONAL_CFG_FIELDS = ["edns_size", "edns_do", "edns_nsid"] def __init__(self, cfg, expres): ExpResCriterion_DNSBased.__init__(self, cfg, expres) self.edns = self._enforce_param("edns", bool) self.edns_size = self._enforce_param("edns_size", int) self.edns_do = self._enforce_param("edns_do", bool) self.edns_nsid = self._enforce_list("edns_nsid", str) def __str__(self): if self.edns: r = "EDNS supported" if self.edns_size is not None: r += "; size >= {}".format(self.edns_size) if self.edns_do is not None: if self.edns_do: r += "; DO flag on" else: r += "; DO flag off" if self.edns_nsid: r += "; NSID in " r += ", ".join(self.edns_nsid) else: r = "EDNS not supported" return r def prepare_response(self, result, response): res = ParsedResult_EDNS(self.expres.monitor, result, response) self.response_edns = res.edns self.response_edns_size = res.edns_size self.response_edns_do = res.edns_do self.response_edns_nsid = res.edns_nsid def response_matches(self, response): if self.response_edns and not self.edns: logger.debug( " EDNS is supported while it shouldn't" ) return False if not self.response_edns and self.edns: logger.debug( " EDNS is not supported while it should be" ) return False if self.edns and self.edns_size: if self.response_edns_size < self.edns_size: logger.debug( " EDNS udp size {} < {}".format( self.response_edns_size, self.edns_size ) ) return False if self.edns and self.edns_do is not None: if self.response_edns_do and not self.edns_do: logger.debug( " EDNS DO flag is on while it should be off" ) return False if not self.response_edns_do and self.edns_do: logger.debug( " EDNS DO flag is off while is should be on" ) return False if self.edns and self.edns_nsid: if not self.response_edns_nsid: logger.debug( " EDNS NSID option missing" ) return False logger.debug( " verifying if NSID {} is in {}...".format( self.response_edns_nsid, ", ".join(self.edns_nsid) ) ) if self.response_edns_nsid not in self.edns_nsid: return False return True class ExpResCriterion_AnswersSection(object): """DNS answer section One of "answers", "authorities", "additionals". Each section must contain a list of DNS records. """ def __init__(self, name, cfg): self.name = name self.records = [] for record_cfg in cfg: self.add_record(record_cfg) def add_record(self, record_cfg): if "type" not in record_cfg: raise ConfigError("Missing mandatory attribute: type") record_class = None for record_class in HANDLED_RECORD_TYPES: if record_class.RECORD_TYPE.lower() == record_cfg["type"].lower(): self.records.append(record_class(record_cfg)) return raise ConfigError( "Unhandled record type: {}".format( record_cfg["type"] ) ) def __str__(self): return "Section {}: {}".format( self.name, ", ".join(map(str, self.records)) ) class ExpResCriterion_DNSAnswers(ExpResCriterion_DNSBased): """Criterion: dns_answers Verify if the responses received by a probe contain the expected records. Available for: dns. `dns_answers`: one or more sections where records are searched on. Must be one of "answers", "authorities", "additionals". Each section must contain a list of records. Match when all the responses received by a probe contain at least one record matching the expected ones in each of the given sections. Example: dns_answers: answers: - <record1> - <record2> authorities: - <record3> - <record4> """ CRITERION_NAME = "dns_answers" AVAILABLE_FOR_MSM_TYPE = ["dns"] MANDATORY_CFG_FIELDS = [] OPTIONAL_CFG_FIELDS = [] def __init__(self, cfg, expres): ExpResCriterion_DNSBased.__init__(self, cfg, expres) self.dns_answers = self._enforce_param("dns_answers", dict) self.sections = [] SECTIONS = ["answers", "authorities", "additionals"] for section in self.dns_answers: if section.lower() not in SECTIONS: raise ConfigError( "Invalid section: {}. Must be one of {}".format( section, ", ".join(SECTIONS) ) ) if isinstance(self.dns_answers[section], list): section_cfg = self.dns_answers[section] elif isinstance(self.dns_answers[section], dict): section_cfg = [self.dns_answers[section]] else: raise ConfigError( "Invalid section {}".format(section) ) self.sections.append( ExpResCriterion_AnswersSection( section, section_cfg ) ) def __str__(self): r = "" for section in self.sections: if r != "": r += "; " r += "{} section: {}".format( section.name, ", ".join(map(str, section.records)) ) return r def prepare_response(self, result, response): # This class doesn't behave like others and doesn't use # the prepare() method to parse results. # It implements checks directly in the response_matches() # method, that verifes every record directly. pass def response_matches(self, response): for section in self.sections: answer_section = getattr(response.abuf, section.name) if len(answer_section) == 0: logger.debug( " section {} not found in the response".format( section.name ) ) return False for record in section.records: for answer_record in answer_section: if record.record_matches(answer_record): return True return False
/ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/ExpResCriteriaDNS.py
0.845081
0.286381
ExpResCriteriaDNS.py
pypi
from .Errors import ConfigError from .Helpers import BasicConfigElement class ExpResCriterion(BasicConfigElement): """ExpResCriterion This class reads expected result attributes from the monitor's config file, validates them and use them to match received results against expected values. The CRITERION_NAME contains the main attribute on which this class is based. The AVAILABLE_FOR_MSM_TYPE list contains all the measurements' types for which this expected result can be used ("ping", "traceroute", ...). The OPTIONAL_CFG_FIELDS list contains a list of optional attributes that can be used in the matching process. The MANDATORY_CFG_FIELDS list contains a list of mandatory arguments needed to process the results matching. The CRITERION_NAME is implicitly part of this list. The __init__() method must be used to read and validate each attribute. The prepare() method is called before the result_matches() method and is used to parse received results. The result_matches() is used to determine whether the received result matched the expected values. The __str__() and display_string() methods are used to print a brief description of this expected result and a more detailed description to be used when displaying the monitor configuration in a textual form. Keep this docstring in sync with docs/CONTRIBUTING.rst file. """ CRITERION_NAME = None AVAILABLE_FOR_MSM_TYPE = [] OPTIONAL_CFG_FIELDS = [] MANDATORY_CFG_FIELDS = [] @classmethod def get_cfg_fields(cls): m = set(cls.MANDATORY_CFG_FIELDS) o = set(cls.OPTIONAL_CFG_FIELDS) if cls.CRITERION_NAME: m.add(cls.CRITERION_NAME) return m, o def __init__(self, cfg, expres): if self.CRITERION_NAME is None: raise NotImplementedError() if len(self.AVAILABLE_FOR_MSM_TYPE) == 0: raise NotImplementedError() BasicConfigElement.__init__(self, cfg) self.monitor = expres.monitor self.expres = expres if self.monitor.msm_type not in self.AVAILABLE_FOR_MSM_TYPE: raise ConfigError( "Can't use {} for this measurement; " "it is available only on {}.".format( self.CRITERION_NAME, ", ".join(self.AVAILABLE_FOR_MSM_TYPE) ) ) def prepare(self, result): # pragma: no cover # Called before result_matches(). # It can be used to parse results and store the new internal # data structures only once. # For example, the TracerouteBased criteria use this to build # AS path only once for each result/probe and store it in the # parsed_res cache; the first _TracerouteBased criterion # builds the path and store it using set_parsed_res, the following # criteria find the path already built and reuse it. raise NotImplementedError() def result_matches(self, result): # pragma: no cover raise NotImplementedError() def __str__(self): # pragma: no cover raise NotImplementedError() def _str_list(self): if hasattr(self, self.CRITERION_NAME): return ", ".join(map(str, getattr(self, self.CRITERION_NAME))) else: raise NotImplementedError() # pragma: no cover def display_string(self): # pragma: no cover return " - {}".format(str(self))
/ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/ExpResCriteriaBase.py
0.726037
0.461623
ExpResCriteriaBase.py
pypi
import IPy from .Errors import ConfigError, ResultProcessingError from .ExpResCriteriaBase import ExpResCriterion from .Logging import logger from .ParsedResults import ParsedResult_RTT, ParsedResult_DstResponded, \ ParsedResult_DstIP class ExpResCriterion_RTT(ExpResCriterion): """Criterion: rtt Test the median round trip time toward destination. Available for: ping, traceroute. `rtt`: maximum RTT (in ms). `rtt_tolerance` (optional): tolerance (in %) on `rtt`. If `rtt_tolerance` is not given, match when measured RTT is less than `rtt`, otherwise match when measured RTT is within `rtt` +/- `rtt_tolerance` %. Examples: expected_results: LowRTT: rtt: 50 Near150: rtt: 150 rtt_tolerance: 30 """ CRITERION_NAME = "rtt" AVAILABLE_FOR_MSM_TYPE = ["traceroute", "ping"] MANDATORY_CFG_FIELDS = [] OPTIONAL_CFG_FIELDS = ["rtt_tolerance"] def __init__(self, cfg, expres): ExpResCriterion.__init__(self, cfg, expres) self.rtt = self._enforce_param("rtt", int) self.rtt_tolerance = self._enforce_param("rtt_tolerance", int) or 0 def __str__(self): if self.rtt: if self.rtt_tolerance: return "RTT must be within {}ms +/- {}%".format( self.rtt, self.rtt_tolerance ) else: return "RTT must be less than {}ms".format( self.rtt ) def prepare(self, result): res = ParsedResult_RTT(self.expres.monitor, result) self.res_rtt = res.rtt def result_matches(self, result): result_rtt = self.res_rtt if result_rtt: if not self.rtt_tolerance: logger.debug( " verifying if RTT {} < {}...".format( result_rtt, self.rtt ) ) if result_rtt > self.rtt: return False else: logger.debug( " verifying if RTT {} within {} +/- {}%...".format( result_rtt, self.rtt, self.rtt_tolerance ) ) delta = self.rtt * self.rtt_tolerance / 100 if abs(self.rtt - result_rtt) > delta: return False else: raise ResultProcessingError( "Can't verify RTT: RTT is unknown" ) return True class ExpResCriterion_DstResponded(ExpResCriterion): """Criterion: dst_responded Verify if destination responded. Available for: traceroute, ping, sslcert. `dst_responded`: boolean indicating if the destination is expected to be responding or not. For ping, a destination is responding if a probe received at least one reply packet. For sslcert, a destination is responding if at least one certificate is received by the probe. If `dst_responded` is True, match when a destination is responding. If `dst_responded` is False, match when a destination is not responding. Example: expected_results: DestinationReachable: dst_responded: True """ CRITERION_NAME = "dst_responded" AVAILABLE_FOR_MSM_TYPE = ["traceroute", "ping", "sslcert"] MANDATORY_CFG_FIELDS = [] OPTIONAL_CFG_FIELDS = [] def __init__(self, cfg, expres): ExpResCriterion.__init__(self, cfg, expres) self.dst_responded = self._enforce_param("dst_responded", bool) def __str__(self): if self.dst_responded: return "Destination must respond" else: return "Destination must not respond" def prepare(self, result): res = ParsedResult_DstResponded(self.expres.monitor, result) self.res_responded = res.responded def result_matches(self, result): result_responded = self.res_responded logger.debug( " testing if target responded ({}) is {}...".format( result_responded, self.dst_responded ) ) if result_responded and not self.dst_responded: logger.debug(" target responded while it should not") return False if not result_responded and self.dst_responded: logger.debug(" target did not respond") return False return True class ExpResCriterion_DstIP(ExpResCriterion): """Criterion: dst_ip Verify that the destination IP used by the probe for the measurement is the expected one. Available for: traceroute, ping, sslcert. `dst_ip`: list of expected IP addresses (or prefixes). Match when the probe destination IP is one of the expected ones (or falls within one of the expected prefixes). Examples: dst_ip: 192.168.0.1 dst_ip: - 192.168.0.1 - 2001:DB8::1 dst_ip: - 192.168.0.1 - 10.0.0.0/8 - 2001:DB8::/32 """ CRITERION_NAME = "dst_ip" AVAILABLE_FOR_MSM_TYPE = ["traceroute", "ping", "sslcert"] MANDATORY_CFG_FIELDS = [] OPTIONAL_CFG_FIELDS = [] def __init__(self, cfg, expres): ExpResCriterion.__init__(self, cfg, expres) self.dst_ip = [] dst_ip = self._enforce_list("dst_ip", str) for ip in dst_ip: try: self.dst_ip.append(IPy.IP(ip)) except: raise ConfigError("Invalid IP address/net: {}".format(ip)) def __str__(self): all_subnets = True for ip in self.dst_ip: all_subnets = all_subnets and ip.prefixlen() not in [32, 128] more_than_one = len(self.dst_ip) > 1 if all_subnets: tpl = "Destination IP must fall into {}" else: if more_than_one: tpl = "Destination IP must be in {}" else: tpl = "Destination IP must be {}" return tpl.format(self._str_list()) def prepare(self, result): res = ParsedResult_DstIP(self.expres.monitor, result) self.res_dst_ip = res.dst_ip def result_matches(self, result): result_dst_ip = self.res_dst_ip try: result_dst_ip = IPy.IP(result_dst_ip) except: raise ResultProcessingError( "Invalid destination IP address: {}".format(result_dst_ip) ) match = False for dst_ip in self.dst_ip: if dst_ip.prefixlen() not in [32, 128]: logger.debug( " verifying if destination IP {} falls into " "the expected subnet ({})...".format( result_dst_ip, dst_ip ) ) match = result_dst_ip in dst_ip else: logger.debug( " verifying if destination IP {} matches " "the expected one ({})".format( result_dst_ip, self.dst_ip ) ) match = result_dst_ip == dst_ip if match: break return match
/ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/ExpResCriteriaCommon.py
0.806129
0.373876
ExpResCriteriaCommon.py
pypi
import datetime import json import os from six.moves.queue import Queue, Empty import time from threading import Thread import pytz import yaml from .Action import ACTION_CLASSES from .Config import Config from .Errors import ConfigError, MissingFileError, \ MeasurementProcessingError, \ LockError, ProgramError from .ExpectedResult import ExpectedResult from .Helpers import BasicConfigElement, LockFile, ProbesFilter from .Logging import logger from .MsmProcessingUnit import MsmProcessingUnit from ripe.atlas.cousteau import AtlasStream from ripe.atlas.sagan import Result from .Rule import Rule class MonitorResultsThread(Thread): def __init__(self, monitor, probes_filter): Thread.__init__(self) self.monitor = monitor self.probes_filter = probes_filter def run(self): while not self.monitor.exit_thread: try: result = self.monitor.results_queue.get(True, 1) self.monitor.process_results([result], self.probes_filter) except Empty: pass class Monitor(BasicConfigElement, MsmProcessingUnit): """Monitor A monitor allows to process results from a measurement. `descr` (optional): monitor's brief description. `measurement-id` (optional): measurement ID used to gather results. It can be given (and/or overwritten) via command line argument `--measurement-id`. `matching_rules`: list of rules to match probes against. When a probe matches one of these rules, its expected results are processed and its actions are performed. `expected_results` (optional): list of expected results. Probe's expected results contain references to this list. `actions` (optional): list of actions to be executed on the basis of probe's expected results. `stream` (optional): boolean indicating if results streaming must be used. It can be given (and/or overwritten) via command line argument `--stream`. `stream_timeout` (optional): how long to wait (in seconds) before stopping a streaming monitor if no results are received on the stream. `key` (optional): RIPE Atlas key to access the measurement. It can be given (and/or overwritten) via command line argument `--key`. `key_file` (optional): a file containing the RIPE Atlas key to access the measurement. The file must contain only the RIPE Atlas key, in plain text. If `key` is given, this field is ignored. """ MANDATORY_CFG_FIELDS = ["matching_rules"] OPTIONAL_CFG_FIELDS = ["measurement-id", "stream", "stream_timeout", "actions", "key", "key_file", "descr", "expected_results"] def _get_statusfile_path(self): if self.monitor_name: return "{}/status/{}.{}.json".format( Config.get("var_dir"), self.monitor_name, self.msm_id ) return None def _get_lockfile_path(self): if self.monitor_name: return "{}/locks/{}.{}.lock".format( Config.get("var_dir"), self.monitor_name, self.msm_id ) return None def _load_from_file(self, monitor_name): file_path = "{}/monitors/{}.yaml".format( Config.get("var_dir"), monitor_name ) if not os.path.isfile(file_path): raise MissingFileError(path=file_path) try: with open(file_path, "r") as monitor_file: try: return yaml.load(monitor_file.read()) except yaml.parser.ParserError as e: raise ConfigError( "Error in YAML syntax: {}".format(str(e)) ) except Exception as e: raise ConfigError( "Can't read from the monitor configuration file {}: {}".format( file_path, str(e) ) ) def __init__(self, cfg_or_name, ip_cache=None, msm_id=None, key=None): if isinstance(cfg_or_name, str): self.monitor_name = cfg_or_name cfg = self._load_from_file(self.monitor_name) elif isinstance(cfg_or_name, dict): self.monitor_name = None cfg = cfg_or_name else: raise ConfigError( "Invalid monitor name or configuration type: {}".format( type(cfg_or_name) ) ) BasicConfigElement.__init__(self, cfg) self.normalize_fields() self.descr = self._enforce_param("descr", str) self._enforce_param("matching_rules", list) self._enforce_param("expected_results", dict) or {} self._enforce_param("actions", dict) MsmProcessingUnit.__init__( self, ip_cache=ip_cache, msm_id=msm_id or self._enforce_param("measurement-id", int), key=key or self._enforce_param("key", str), key_file=self._enforce_param("key_file", str) ) self.stream = self._enforce_param("stream", bool) or False self.stream_timeout = self._enforce_param("stream_timeout", int) if self.stream: self.ensure_streaming_enabled(ConfigError) # Expected results normalization self.expected_results = {} if self.cfg["expected_results"]: for expres_name in self.cfg["expected_results"]: expres_cfg = self.cfg["expected_results"][expres_name] try: expres = ExpectedResult(self, expres_name, expres_cfg) self.expected_results[expres_name] = expres except ConfigError as e: raise ConfigError( "Syntax error for expected result {} - {}".format( expres_name, str(e) ) ) # Actions normalization self.actions = {} if self.cfg["actions"]: for action_name in self.cfg["actions"]: action_cfg = self.cfg["actions"][action_name] try: if "kind" in action_cfg: action_kind = action_cfg["kind"] action = None for action_class in ACTION_CLASSES: if action_class.CFG_ACTION_KIND == action_kind: action = action_class(self, action_name, action_cfg) break if action is None: raise ConfigError( "Unknown action kind: {}".format( action_kind ) ) self.actions[action_name] = action else: raise ConfigError("Missing action kind") except ConfigError as e: raise ConfigError( "Syntax error for action {} - {}".format( action_name, str(e) ) ) # Rules normalization self.rules = [] rule_n = 0 for rule_cfg in self.cfg["matching_rules"]: rule_n += 1 try: rule = Rule(self, rule_cfg) self.rules.append(rule) if rule.expected_results and rule.expected_results != []: for expres_name in rule.expected_results: if expres_name not in self.expected_results: raise ConfigError( "Expected result not found: " "{}".format(expres_name) ) if rule.actions and rule.actions != []: for action_name in rule.actions: if action_name not in self.actions: raise ConfigError( "Action not found: " "{}".format(action_name) ) except ConfigError as e: raise ConfigError( "Syntax error for rule n. {} - {}".format( rule_n, str(e) ) ) self.internal_labels = { "probes": {}, "results": {} } self.exit_thread = False self.results_queue = None self.lock_fd = None self.lock_file = LockFile() self.status = {} self.load_status() self._epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC) def load_status(self): status_filepath = self._get_statusfile_path() if status_filepath: if os.path.isfile(status_filepath): try: with open(status_filepath, "r") as f: self.status = json.loads(f.read()) except: raise ProgramError( "Can't read status from {}".format(status_filepath) ) def write_status(self): status_filepath = self._get_statusfile_path() if status_filepath: try: with open("{}.tmp".format(status_filepath), "w") as f: f.write(json.dumps(self.status)) os.rename("{}.tmp".format(status_filepath), status_filepath) except: raise ProgramError( "Can't write status to {}".format(status_filepath) ) def __str__(self): if self.descr: return "monitor '{}'".format(self.descr) else: if self.monitor_name: tpl = ("monitor '{name}' " "(measurement ID {msm_id}, {msm_type}, IPv{af})") else: tpl = ("monitor for measurement ID {msm_id} " "({msm_type}, IPv{af})") return tpl.format( name=self.monitor_name, msm_id=self.msm_id, msm_type=self.msm_type, af=self.msm_af ) def display(self): if self.msm_id: print("Measurement ID: {}".format(self.msm_id)) else: print( "Measurement ID not specified, it must be given using the " "command line argument --measurement-id." ) print("") if self.stream: if self.stream_timeout: print("Streaming of results enabled - " "{} seconds timeout.".format(self.stream_timeout)) else: print("Streaming of results enabled - no timeout.") print("") rule_n = 0 for rule in self.rules: rule_n += 1 print("Matching rule n. {}".format(rule_n)) print("") rule.display() if len(rule.expected_results) > 0: print(" Expected results:") print("") for expres_name in rule.expected_results: self.expected_results[expres_name].display() else: print(" No expected results for this rule.") print("") if len(rule.actions) > 0: print(" Actions:") print("") for action_name in rule.actions: if self.actions[action_name].when == "on_match": tpl = " Action fired on match: {}" elif self.actions[action_name].when == "on_mismatch": tpl = " Action fired on mismatch: {}" else: tpl = " Action always fired: {}" print(tpl.format(self.actions[action_name])) print("") def process_matching_probe(self, probe, rule_n, rule, result): logger.info(" probe ID {} matches".format(probe.id)) if len(rule.expected_results) == 0: logger.result( "Rule n. {} ({}), {}, " "no expected result.".format( rule_n, str(rule), probe ) ) rule.perform_actions(result=result) return for expres_name in rule.expected_results: expres = self.expected_results[expres_name] logger.info( "Verifying expected result {}: {}...".format( expres_name, str(expres) ) ) result_matches = expres.result_matches(result) logger.result( "Rule n. {} ({}), {}, " "expected result {}: {}.".format( rule_n, str(rule), probe, expres_name, "OK" if result_matches else "MISMATCH" ) ) rule.perform_actions(result=result, expres=expres, result_matches=result_matches) def update_latest_result_ts(self, result): ts = (result.created - self._epoch).total_seconds() if "latest_result_ts" in self.status: if self.status["latest_result_ts"] > ts: return self.status["latest_result_ts"] = ts self.write_status() def process_results(self, results, probes_filter): logger.info("Processing results...") # Be sure to have info for every probe in the resultset self.update_probes(results) if len(results) == 0: logger.info("No results found.") return # Processing results... for json_result in results: result = Result.get(json_result, on_error=Result.ACTION_IGNORE, on_malformation=Result.ACTION_IGNORE) probe = self.get_probe(result) if probe not in probes_filter: logger.debug( " skipping {} because of probes filter".format(probe) ) continue self.process_result(result) self.update_latest_result_ts(result) self.internal_labels["results"] = {} self.ip_cache.save() def process_result(self, result): self.internal_labels["results"] = {} is_success = not result.is_error and not result.is_malformed if not is_success: return probe = self.get_probe(result) logger.info( "Processing result for {} at {}...".format( str(probe), result.created ) ) rule_n = 0 for rule in self.rules: rule_n += 1 logger.info( "Testing rule n. {}: {}...".format(rule_n, str(rule)) ) if rule.probe_matches(probe): self.process_matching_probe(probe, rule_n, rule, result) if rule.process_next is True: logger.info(" next rule processing forcedly enabled") else: break else: logger.info( " {} does not match".format(probe) ) if rule.process_next is False: logger.info(" next rule processing forcedly inhibited") break def on_result_response(self, result): self.results_queue.put(result) def acquire_lock(self): if not self.monitor_name: return if not self.lock_file.acquire(self._get_lockfile_path()): raise LockError( "Another instance of this program is already " "executing this monitor." ) def release_lock(self): self.lock_file.release() def ensure_streaming_enabled(self, exception_class): err = "Can't use results streaming for this measurement: {}" if self.msm_is_oneoff: raise exception_class( err.format("it's a one-off measurement.") ) if not self.msm_is_running: raise exception_class( err.format("it is not running anymore.") ) if not self.msm_is_public: raise exception_class( err.format("it is not public.") ) def run_stream(self, probes_filter): logger.info(" - using real-time results streaming") self.ensure_streaming_enabled(MeasurementProcessingError) try: atlas_stream = AtlasStream() atlas_stream.connect() atlas_stream.bind_channel("result", self.on_result_response) stream_params = {"msm": self.msm_id} except Exception as e: raise MeasurementProcessingError( "Error while creating the stream: {}".format(str(e)) ) self.results_queue = Queue() thread = MonitorResultsThread(self, probes_filter) try: thread.start() atlas_stream.start_stream(stream_type="result", **stream_params) atlas_stream.timeout(seconds=self.stream_timeout) atlas_stream.disconnect() except: try: atlas_stream.disconnect() except: pass finally: try: atlas_stream.disconnect() except: pass self.exit_thread = True thread.join(timeout=10) def get_latest_result_ts(self): if "latest_result_ts" in self.status: return self.status["latest_result_ts"] return None def run_once(self, probes_filter, start=None, stop=None, latest_results=None): fetch_only_probe_ids = probes_filter.probe_ids results = self.download(start=start, stop=stop, latest_results=latest_results, probe_ids=fetch_only_probe_ids) self.process_results(results, probes_filter) def run_continously(self, start, probes_filter): try: while True: self.run_once(probes_filter, start=start) logger.info( "Waiting {} seconds (measurement's interval) before " "downloading new results...".format(self.msm_interval) ) time.sleep(self.msm_interval) except KeyboardInterrupt: pass def run(self, start=None, stop=None, latest_results=None, dont_wait=False, probes_filter=None): self.acquire_lock() if not probes_filter: probes_filter = ProbesFilter() logger.info("Starting {}".format(str(self))) try: if self.stream: self.run_stream(probes_filter) elif not self.msm_is_oneoff and self.msm_is_running and \ not latest_results and not stop and not dont_wait: self.run_continously(start, probes_filter) else: self.run_once(probes_filter, start=start, stop=stop, latest_results=latest_results) finally: self.release_lock()
/ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/Monitor.py
0.624294
0.222014
Monitor.py
pypi
from .Errors import ConfigError, ResultProcessingError from .ExpResCriteria import CRITERIA_CLASSES from .Helpers import BasicConfigElement from .Logging import logger class ExpectedResult(BasicConfigElement): """Expected result A group of criteria used to match probes' results. `descr` (optional): a brief description of this group of criteria. Matching rules reference this on their `expected_results` list. When a probe matches a rule, the keys in the `expected_results` list of that rule are used to obtain the group of criteria to be used to process the result. Example: matching_rules: - descr: Probes from France via AS64496 src_country: FR expected_results: ViaAS64496 expected_results: ViaAS64496: upstream_as: 64496 """ OPTIONAL_CFG_FIELDS = ["descr"] MANDATORY_CFG_FIELDS = [] @classmethod def get_cfg_fields(cls): m = set(cls.MANDATORY_CFG_FIELDS) o = set(cls.OPTIONAL_CFG_FIELDS) for criterion_class in CRITERIA_CLASSES: o.update(criterion_class.get_all_cfg_fields()) return m, o def __init__(self, monitor, name, cfg): BasicConfigElement.__init__(self, cfg) self.monitor = monitor self.name = name self.normalize_fields() self.descr = cfg["descr"] self.criteria = [] for criterion_class in CRITERIA_CLASSES: if self.cfg[criterion_class.CRITERION_NAME] is not None: self.criteria.append(criterion_class(self.cfg, self)) if len(self.criteria) == 0: raise ConfigError("No criteria found.") def __str__(self): if self.descr: return self.descr else: ret = [] for criterion in self.criteria: ret.append(str(criterion)) if len(ret) > 0: return "; ".join(ret) else: return "No expected results" def display(self): if self.descr: print(" {}: {}".format(self.name, self.descr)) else: print(" {}".format(self.name)) for criterion in self.criteria: print(criterion.display_string()) print("") def result_matches(self, result): # result is a ripe.atlas.sagan result probe = self.monitor.get_probe(result) result_descr = "{}, {}".format(probe, result.created) for criterion in self.criteria: try: criterion.prepare(result) if not criterion.result_matches(result): return False except ResultProcessingError as e: logger.warning( "Error processing result {}: {}".format( result_descr, str(e) ) ) return False return True
/ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/ExpectedResult.py
0.895443
0.239077
ExpectedResult.py
pypi
import re import six from .Action import Action, ACTION_CLASSES from .ExpectedResult import ExpectedResult from .ExpResCriteria import CRITERIA_CLASSES, \ CRITERIA_CLASSES_COMMON, \ CRITERIA_CLASSES_TRACEROUTE, \ CRITERIA_CLASSES_SSL, \ CRITERIA_CLASSES_DNS from .ExpResCriteriaDNS import ExpResCriterion_DNSAnswers, \ ExpResCriterion_AnswersSection from .ExpResCriteriaDNSRecords import ExpResCriterion_DNSRecord, \ HANDLED_RECORD_TYPES from .Monitor import Monitor from .Rule import Rule def format_docstring(docstring): if not docstring: return '' # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = six.MAXSIZE for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < six.MAXSIZE: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: return '\n'.join(trimmed) def get_params(ds, c): params = [] # list of (param_name, is_optional, is_list) like_a_param_pattern = re.compile("^`.+") param_pattern = re.compile("^`([\w_-]+)`\s?(\(optional\))?:\s(list)?") for line in ds.split("\n"): match = param_pattern.match(line) if like_a_param_pattern.match(line) and not match: raise ValueError( "{}: it seems a param but it's not: {}".format( c, line ) ) if match: params.append( ( match.group(1), match.group(2) is not None, match.group(3) is not None ) ) all_fields = [] mandatory_fields = [] optional_fields = [] if hasattr(c, "CRITERION_NAME"): mandatory_fields.append(c.CRITERION_NAME) if hasattr(c, "MANDATORY_CFG_FIELDS"): mandatory_fields.extend(getattr(c, "MANDATORY_CFG_FIELDS")) all_fields += mandatory_fields for param in params: if param[1] and param[0] in mandatory_fields: raise ValueError( "{}: param {} optional but it is mandatory".format( c, param[0] ) ) if hasattr(c, "OPTIONAL_CFG_FIELDS"): optional_fields = getattr(c, "OPTIONAL_CFG_FIELDS") all_fields += optional_fields for param in params: if not param[1] and param[0] in optional_fields: raise ValueError( "{}: param {} mandatory but it is optional".format(c, param[0]) ) for field in all_fields: if field not in [param[0] for param in params]: raise ValueError("{}: undocumented field: {}".format(c, field)) for param in params: if param[0] not in all_fields: raise ValueError("{}: unknown param: {}".format(c, param[0])) return params def get_class_descr(ds_or_class): if isinstance(ds_or_class, str): return ds_or_class.split("\n")[0] else: ds = format_docstring(ds_or_class.__doc__) if ds: return get_class_descr(ds) else: return None def dump_doc_title(s, lvl): HEADINGS = { "1": "=" * len(s), "2": "-" * len(s), "3": "*" * len(s), "4": "+" * len(s), "5": "`" * len(s) } return s + "\n" + HEADINGS[str(lvl)] + "\n" def dump_doc(c, lvl): s = format_docstring(c.__doc__) title = get_class_descr(s) r = "" r += dump_doc_title(title, lvl) + "\n" lines = s.split("\n")[1:] example = False configuration_fields = False parameters = False for line in lines: line = line.replace("`", "``") if line == "": parameters = False else: if line.startswith("Example"): example = True r += "**{}**\n".format(line) r += "\n" r += ".. code:: yaml\n" continue elif line.startswith("`"): parameters = True if not configuration_fields: r += "**Configuration fields:**\n\n" configuration_fields = True r += "- " elif line.startswith("Available for:"): r += "**Available for**:\n\n" msm_types = line.split(":")[1].split(",") for msm_type in msm_types: r += "- " + msm_type.strip().replace(".", "") + "\n\n" continue else: if example: r += " " if parameters: r += " " r += line + "\n" r += "\n" if c == ExpResCriterion_DNSAnswers: r += dump_doc(ExpResCriterion_DNSRecord, lvl+1) for subc in HANDLED_RECORD_TYPES: r += dump_doc(subc, lvl+1) return r def build_doc(): r = dump_doc_title("Monitor configuration syntax", 1) + "\n" r += ".. contents::\n\n" r += dump_doc(Monitor, 2) r += dump_doc(Rule, 2) r += dump_doc(ExpectedResult, 2) criteria = ( ("Common criteria", CRITERIA_CLASSES_COMMON), ("Traceroute criteria", CRITERIA_CLASSES_TRACEROUTE), ("SSL criteria", CRITERIA_CLASSES_SSL), ("DNS criteria", CRITERIA_CLASSES_DNS) ) cls_cnt = 0 for group, classes in criteria: r += dump_doc_title(group, 3) + "\n" for subc in classes: cls_cnt += 1 r += dump_doc(subc, 4) if cls_cnt != len(CRITERIA_CLASSES): raise ValueError( "One or more criteria classes have not been processed. " "{} processed vs {} total.".format(cls_cnt, len(CRITERIA_CLASSES)) ) r += dump_doc(Action, 2) for subc in ACTION_CLASSES: r += dump_doc(subc, 3) return r def dump_yaml(c, indent="", show_doc=True, elements_type="", comment_optional=True, index=0): INDENT = " " r = "" s = format_docstring(c.__doc__) if not s: return "" if show_doc: # print the whole docstring r += indent + "# {}\n".format("="*75) for line in s.split("\n"): r += indent + "# " + line + "\n" r += "\n" params = get_params(s, c) first_list_element = True class_description = get_class_descr(s) if elements_type == "dict": dict_key = class_description.replace(" ", "_") r += indent + "{}_{}:\n".format(dict_key, index+1) elif elements_type == "list": list_comment = class_description r += indent + "# {} n. {}\n".format(list_comment, index+1) if not show_doc: if c in HANDLED_RECORD_TYPES or c in ACTION_CLASSES: r += indent + "# {}\n".format(class_description) prefix = "" if elements_type == "dict": prefix = INDENT for param in params: if elements_type == "list": if first_list_element: r += indent + "- \n" first_list_element = False prefix = " " r += indent + "{prefix}{comment}{param}: \n".format( prefix=prefix, comment="#" if comment_optional and param[1] else "", param=param[0] ) if c == Monitor and param[0] == "expected_results": for i in range(2): r += dump_yaml(ExpectedResult, indent=indent + INDENT, elements_type="dict", show_doc=show_doc and i == 0, comment_optional=comment_optional, index=i) elif c == Monitor and param[0] == "matching_rules": for i in range(2): r += dump_yaml(Rule, indent=indent, elements_type="list", show_doc=show_doc and i == 0, comment_optional=comment_optional, index=i) elif c == Monitor and param[0] == "actions": for i in range(2): r += dump_yaml(Action, indent=indent + INDENT, elements_type="dict", show_doc=show_doc and i == 0, comment_optional=comment_optional, index=i) else: if param[2]: for i in range(3): param_name = param[0] if c == Rule and param_name == "expected_results": param_name = get_class_descr(ExpectedResult) if c == Rule and param_name == "actions": param_name = get_class_descr(Action) param_name = param_name.replace(" ", "_") r += indent + "{prefix}{comment}- {param}_{i}\n".format( comment="#" if comment_optional and param[1] else "", prefix=prefix, param=param_name, i=i+1 ) r += "\n" if c == ExpectedResult: r += indent + prefix + "# one or more of the following criteria\n\n" for subc in CRITERIA_CLASSES: r += dump_yaml(subc, indent=indent + INDENT, show_doc=show_doc, comment_optional=comment_optional, index=index) elif c == Action: r += indent + prefix + ("# one or more of the following " "action-specific parameters\n\n") for subc in ACTION_CLASSES: r += dump_yaml(subc, indent=indent + INDENT, show_doc=show_doc, comment_optional=comment_optional, index=index) elif c == ExpResCriterion_DNSAnswers: for i in range(2): r += dump_yaml(ExpResCriterion_AnswersSection, indent=indent + INDENT, elements_type="dict", show_doc=show_doc and i == 0, comment_optional=comment_optional, index=i) elif c == ExpResCriterion_AnswersSection: for i in range(2): r += dump_yaml(ExpResCriterion_DNSRecord, indent=indent + INDENT, elements_type="list", show_doc=show_doc and i == 0, comment_optional=comment_optional, index=i) elif c == ExpResCriterion_DNSRecord: r += indent + prefix + ("# one of the following " "record-specific parameters\n\n") for subc in HANDLED_RECORD_TYPES: r += dump_yaml(subc, indent=indent + prefix, show_doc=show_doc, comment_optional=comment_optional, index=index) return r def build_monitor_cfg_tpl(comment_optional=True, show_doc=True): r = "" r += dump_yaml(Monitor, comment_optional=comment_optional, show_doc=show_doc) return r if __name__ == "__main__": import argparse parser = argparse.ArgumentParser( description="Doc builder for RIPE Atlas Monitor" ) subparsers = parser.add_subparsers( title="commands", dest="command" ) sub_parser = subparsers.add_parser( "doc", help="Build monitor doc" ) sub_parser = subparsers.add_parser( "cfg", help="Build monitor config template" ) sub_parser.add_argument( "--no-comment", action="store_false", help="Do not comment optional fields", dest="comment_optional" ) args = parser.parse_args() if args.command == "doc": print(build_doc()) elif args.command == "cfg": print(build_monitor_cfg_tpl(comment_optional=args.comment_optional))
/ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/doc.py
0.59796
0.168651
doc.py
pypi
import json from . import utils class ShopifyCommons(object): PRODUCT_TYPE = "Platforme" """ The type to use when creating a custom product, should properly identify it from the remaining products in the catalog (dummy product) """ PRODUCT_TAGS = "platforme" """ The comma-separated tags to use when creating a customizable product """ @classmethod def build_shopify_product(cls, **kwargs): """ Generates a product payload (dictionary) ready to be used for the creation of an equivalent Shopify product. The structure of the payload must respect the equivalent structure of the product under the Shopify API. """ # builds the context object to be stored in the Shopify product's metafields context = dict( brand=kwargs.get("brand"), model=kwargs.get("model"), variant=kwargs.get("variant"), version=kwargs.get("version"), parts=kwargs.get("parts"), initials_extra=kwargs.get("initials_extra"), scale=kwargs.get("scale"), gender=kwargs.get("gender"), size=kwargs.get("size"), currency=kwargs.get("currency"), country=kwargs.get("country"), flag=kwargs.get("flag"), ) # computes the values for the shopify product payload using the appropriate # 3DB business logic overrides or the default implementations title = utils.try_execute_method( "build_shopify_title", fallback=cls.build_shopify_title, **kwargs ) price = utils.try_execute_method( "build_shopify_price", fallback=cls.build_shopify_price, **kwargs ) size_scaled = utils.try_execute_method( "build_shopify_size", fallback=cls.build_shopify_size, **kwargs ) scale = utils.try_execute_method( "build_shopify_scale", fallback=cls.build_shopify_scale, **kwargs ) gender = utils.try_execute_method( "build_shopify_gender", fallback=cls.build_shopify_gender, **kwargs ) sku = utils.try_execute_method( "build_shopify_sku", fallback=cls.build_shopify_sku, **kwargs ) customized_variant = utils.try_execute_method( "build_shopify_variant", fallback=cls.build_shopify_variant, **kwargs ) # builds the options and customized variant dictionary to # use in the returned payload options = [] if size_scaled: options.append(dict(name="Size")) customized_variant["option%d" % len(options)] = size_scaled context["size_scaled"] = size_scaled if scale: options.append(dict(name="Scale")) customized_variant["option%d" % len(options)] = scale if gender: options.append(dict(name="Gender")) customized_variant["option%d" % len(options)] = gender if price and not "price" in customized_variant: customized_variant["price"] = price if sku and not "sku" in customized_variant: customized_variant["sku"] = sku if "sku" in customized_variant: context["sku"] = customized_variant["sku"] return dict( title=title, options=options, variants=[customized_variant], metafields=[ dict(namespace="seo", key="hidden", value=1, type="integer"), dict( namespace="platforme", key="context", value=json.dumps(context), type="json_string", ), ], product_type=cls.PRODUCT_TYPE, tags=cls.PRODUCT_TAGS, ) @classmethod def build_shopify_title(cls, product_title=None, original_product=None, **kwargs): if product_title: return product_title if original_product: return original_product["title"] return None @classmethod def build_shopify_price( cls, ripe_api=None, brand=None, model=None, parts=None, variant=None, version=None, initials_extra=None, currency=None, country=None, flag=None, **kwargs ): price = ripe_api.price_config( brand=brand, model=model, p=parts, variant=variant, version=version, initials_extra=initials_extra, currency=currency, country=country, flag=flag, ) return price["total"]["price_final"] @classmethod def build_shopify_size( cls, ripe_api=None, size=None, scale=None, gender=None, size_scaled=None, **kwargs ): if size_scaled: return size_scaled if scale == "one_size": return "One Size" if not scale or not size or not gender or not ripe_api: return None size_scaled = ripe_api.native_to_size(scale, size, gender)["value"] is_decimal = not size_scaled % 1 == 0 return size_scaled if is_decimal else int(size_scaled) @classmethod def build_shopify_scale(cls, scale=None, **kwargs): if not scale: return None return scale.upper() @classmethod def build_shopify_gender(cls, gender=None, **kwargs): if not gender: return None return gender.capitalize() @classmethod def build_shopify_sku( cls, ripe_api=None, brand=None, model=None, variant=None, parts=None, initials_extra=None, size=None, gender=None, **kwargs ): try: sku_config = ripe_api.sku_config( brand=brand, model=model, variant=variant, p=parts, initials_extra=initials_extra, size=size, gender=gender, ) sku = sku_config.get("sku") return sku except Exception as err: if not err.code == 400: raise return None @classmethod def build_shopify_variant(cls, **kwargs): return dict()
/ripe-commons-logic-0.2.4.tar.gz/ripe-commons-logic-0.2.4/src/ripe_commons_logic/shopify.py
0.722918
0.297942
shopify.py
pypi
<h1><a href="https://tech.platforme.com"><img src="res/logo.svg" alt="RIPE Rainbow" height="60" style="height: 60px;"></a></h1> **Bringing happiness to the RIPE world through testing 🌈** RIPE Rainbow is a simple automation test framework for the RIPE world. ## Installation ```bash pip install ripe-rainbow ``` ## Execution ```bash rainbow ``` ## Configuration | Name | Type | Default | Description | | ------------------------ | ------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | | **LEVEL** | `str` | `INFO` | Controls the verbosity level of the attached logger. | | **SILENT** | `bool` | `False` | If the test execution should run under silent mode (no stdout from logs). | | **FILTER** | `str` | `None` | The filter regex to be used by some of the loaders. | | **DRIVER** | `str` | `selenium` | The driver to be used for the interactive mode. | | **TIMEOUT** | `int` | `15` | The timeout in seconds to be used by default for interactions under the interactive testing mode. | | **REPEAT** | `int` | `1` | The number of times to repeat the execution of the tests. | | **TESTS_PATH** | `str` | `.` | The base path to be used in the loading of the test cases. | | **PROVISION** | `bool` | `True` | If the provision operations should be performed. | | **STACKTRACES** | `bool` | `False` | If "stacktrace" log should be stored on failure of tests. | | **STACKTRACES_PATH** | `bool` | `.` | The base path to be used to save the stacktraces log. | | **SCREENSHOTS** | `bool` | `False` | If screenshots should be save on failure of tests. | | **SCREENSHOTS_PATH** | `bool` | `.` | The base path to be used to save the screenshots. | | **STORE_LOGS** | `bool` | `False` | If the log files from the multiple logs should be store in case of test failure. | | **LOGS_PATH** | `bool` | `.` | The base path to be used to save the log files on failure. | | **SEL_SECURE** | `bool` | `False` | If the [Selenium](https://www.seleniumhq.org) engine should be executed under a secure approach (should be slower). | | **SEL_BROWSER** | `str` | `chrome` | The browser engine that is going to be used by Selenium (eg: `chrome`, `firefox`). | | **SEL_BROWSER_CACHE** | `bool` | `True` | If the [Selenium](https://www.seleniumhq.org) driver should be with browser cache enabled. | | **SEL_FIX_PATH** | `bool` | `True` | If the [Selenium](https://www.seleniumhq.org) driver should try to fix the environment path. | | **SEL_MAXIMIZED** | `bool` | `False` | If the [Selenium](https://www.seleniumhq.org) driver should be started in "maximized" (window) mode. | | **SEL_HEADLESS** | `bool` | `False` | If the [Selenium](https://www.seleniumhq.org) driver should be started in "headless" (window) mode. | | **SEL_DEVICE** | `str` | `None` | If a specific device should be emulated by changing some internal settings (eg: user-agent in usage), possible values include: `iphone10`, `nexus5` | | **SEL_WINDOW_SIZE** | `str` | `1920x1080` | Resolution (in pixels) that the [Selenium](https://www.seleniumhq.org) driver will use for the window. | | **SEL_PIXEL_RATIO** | `int` | `1` | The pixel ratio that the [Selenium](https://www.seleniumhq.org) driver will use, should be used mostly for device testing. | | **SEL_MOBILE_EMULATION** | `bool` | `False` | If the [Selenium](https://www.seleniumhq.org) driver should use the mobile emulation mode (available for Chrome). | | **SEL_SERVICE_ARGS** | `list` | `[]` | List of command line args to be passed to the driver service that interacts with the browser. | | **SEL_POLL_FREQUENCY** | `float` | `None` | The frequency (in seconds) to run the [busy waiting](https://en.wikipedia.org/wiki/Busy_waiting) polling operation on the Selenium `wait` operation. | | **RIPE_ID_USERNAME** | `str` | `None` | The username to be used for the RIPE ID authentication. | | **RIPE_ID_PASSWORD** | `str` | `None` | The password to be used for the RIPE ID authentication. | | Name | Type | Default | Description | | ------------------------ | ----- | ----------------------- | ------------------------------------------------------------------------------------------------------------------------- | | **RIPE_SUFFIX** | `str` | `None` | If defined the RIPE product URLs are suffixed with this value (eg: `sbx` implies `https://ripe-pulse-sbx.platforme.com`). | | **RIPE_CORE_URL** | `str` | `http://localhost:8080` | The base URL to the RIPE Core instance to be used for tests. | | **CORE_URL** | `str` | `http://localhost:8080` | Same as `RIPE_CORE_URL`. | | **RIPE_CORE_USERNAME** | `str` | `root` | The username of an admin user to be used to access RIPE Core, it is also used in the provision operation. | | **CORE_USERNAME** | `str` | `root` | Same as `RIPE_CORE_USERNAME`. | | **RIPE_CORE_PASSWORD** | `str` | `root` | The password of an admin user to be used to access RIPE Core, it is also used in the provision operation. | | **CORE_PASSWORD** | `str` | `root` | Same as `RIPE_CORE_PASSWORD`. | | **RIPE_RETAIL_URL** | `str` | `http://localhost:8080` | The base URL to the RIPE Retail instance to be used for tests. | | **RETAIL_URL** | `str` | `http://localhost:8080` | Same as `RIPE_RETAIL_URL`. | | **RIPE_RETAIL_USERNAME** | `str` | `root` | The username of an admin user to be used to access RIPE Retail, it is also used in the provision operation. | | **RETAIL_USERNAME** | `str` | `root` | Same as `RIPE_RETAIL_USERNAME`. | | **RIPE_RETAIL_PASSWORD** | `str` | `root` | The password of an admin user to be used to access RIPE Retail, it is also used in the provision operation. | | **RETAIL_PASSWORD** | `str` | `root` | Same as `RIPE_RETAIL_PASSWORD`. | | **RETAIL_URL** | `str` | `http://localhost:8080` | Same as `RIPE_RETAIL_URL`. | | **RIPE_PULSE_URL** | `str` | `http://localhost:3000` | The base URL to the RIPE Pulse instance to be used for tests. | | **PULSE_URL** | `str` | `http://localhost:3000` | Same as `RIPE_PULSE_URL`. | | **RIPE_COPPER_URL** | `str` | `http://localhost:3000` | The base URL to the RIPE Copper instance to be used for tests. | | **COPPER_URL** | `str` | `http://localhost:3000` | Same as `RIPE_COPPER_URL`. | | **RIPE_UTIL_VUE_URL** | `str` | `http://localhost:3000` | The base URL to the RIPE Util Vue instance to be used for tests. | | **UTIL_VUE_URL** | `str` | `http://localhost:3000` | Same as `RIPE_UTIL_VUE_URL`. | | **RIPE_WHITE_URL** | `str` | `http://localhost:3000` | The base URL to the RIPE White instance to be used for tests. | | **WHITE_URL** | `str` | `http://localhost:3000` | Same as `RIPE_WHITE_URL`. | ## License RIPE Rainbow is currently licensed under the [Apache License, Version 2.0](http://www.apache.org/licenses/). ## Build Automation [![Build Status](https://app.travis-ci.com/ripe-tech/ripe-rainbow.svg?branch=master)](https://travis-ci.com/github/ripe-tech/ripe-rainbow) [![Build Status GitHub](https://github.com/ripe-tech/ripe-rainbow/workflows/Main%20Workflow/badge.svg)](https://github.com/ripe-tech/ripe-rainbow/actions) [![Coverage Status](https://coveralls.io/repos/ripe-tech/ripe-rainbow/badge.svg?branch=master)](https://coveralls.io/r/ripe-tech/ripe-rainbow?branch=master) [![PyPi Status](https://img.shields.io/pypi/v/ripe-rainbow.svg)](https://pypi.python.org/pypi/ripe-rainbow) [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://www.apache.org/licenses/)
/ripe-rainbow-0.10.3.tar.gz/ripe-rainbow-0.10.3/README.md
0.644113
0.983279
README.md
pypi
import appier from .. import parts class RipeWhitePart(parts.Part): def authorize(self): self.id.authorize() def select_size(self, size, gender=None, scale=None, open=True, wait_closed=True): """ Opens the size selection window, selects the proper scale and size and applies that configuration by clicking 'Apply' and closing the window. Notice that if the "open" flag is unset the window is not opened. :type size: String :param size: The size to be picked. :type gender: String :param gender: The gender that is going to be picked. :type scale: String :param scale: The scale that is going to be picked. :type open: Boolean :param open: If the size modal window should be opened before selection. :type wait_closed: Boolean :param wait_closed: Whether it should wait for the size modal to be closed, not waiting for the closing of the modal should improve performance. """ if open: self.interactions.click(".content .size:not(.disabled) > .button-size") if gender: self.interactions.click(".size .button-gender", text=gender) if scale: self.interactions.click(".size .button-scale", text=str(scale)) self.interactions.click(".size .sizes .button-size", text=str(size)) self.interactions.click(".content .size .button.button-apply") if wait_closed: self.waits.not_visible(".content .size .modal") def select_size_mobile(self, size, gender=None, scale=None, wait_closed=True): """ Opens the size selection window, selects the proper scale and size and applies that configuration by clicking 'Apply' and closing the window. Notice that if the "open" flag is unset the window is not opened. This method is aimed at the mobile version of the size selector. :type size: String :param size: The size to be picked. :type gender: String :param gender: The gender that is going to be picked. :type scale: String :param scale: The scale that is going to be picked. :type open: Boolean :param open: If the size modal window should be opened before selection. :type wait_closed: Boolean :param wait_closed: Whether it should wait for the size modal to be closed, not waiting for the closing of the modal should improve performance. """ if gender: self.interactions.click(".size .button-gender", text=gender) if scale: self.interactions.click(".size .button-scale", text=str(scale)) self.interactions.click(".size .sizes .button-size", text=str(size)) self.interactions.click(".content-mobile .size .button.button-apply") if wait_closed: self.waits.not_visible(".content-mobile .size .modal") def select_part(self, part): self.interactions.click(".content .pickers .button-part[data-part='%s']" % part) def select_part_mobile(self, part): self.interactions.click( ".content-mobile .pickers .button-part[data-part='%s']" % part ) def select_material(self, material): self.interactions.click( ".content .pickers .button-material[data-material='%s']" % material ) def select_material_mobile(self, material): self.interactions.click( ".content-mobile .pickers .button-material[data-material='%s']" % material ) def select_color(self, material, color): self.interactions.click( ".content .pickers .button-color-option[data-material='%s'][data-color='%s']" % (material, color) ) def select_color_mobile(self, material, color): self.interactions.click( ".content-mobile .pickers .button-color-option[data-material='%s'][data-color='%s']" % (material, color) ) def assert_no_part(self, part, timeout=None): self.waits.not_visible( ".content .pickers .button-part > p", text=self._capitalize_words(part), message="The selector for the part '%s' didn't disappear" % part, timeout=timeout, ) def assert_no_part_mobile(self, part, timeout=None): self.waits.not_visible( ".content-mobile .pickers .button-part > p", text=self._capitalize_words(part), message="The selector for the part '%s' didn't disappear" % part, timeout=timeout, ) def assert_no_material(self, part, material): self.select_part(part) self.waits.not_visible(".material li[data-material='%s']" % material) self.waits.not_visible( ".content .pickers .button-color[data-material='%s']" % material ) def assert_no_material_mobile(self, part, material): self.select_part_mobile(part) self.waits.not_visible(".material li[data-material='%s']" % material) self.waits.not_visible( ".content-mobile .pickers .button-color[data-material='%s']" % material ) def assert_no_color(self, part, color): self.select_part(part) self.waits.not_visible(".content .pickers .button-color[data-color='%s']" % color) def assert_no_color_mobile(self, part, color): self.select_part_mobile(part) self.waits.not_visible( ".content-mobile .pickers .button-color[data-color='%s']" % color ) def set_part( self, brand, model, part, material, color, part_text=None, material_text=None, color_text=None, verify=True, has_swatch=True, ): """ Makes a change to the customization of a part and checks that the pages mutates correctly, picking the right active parts, materials and colors, as well as properly switching the swatches. If the text parameters are passed an extra set of assertions are going to be performed to validate expected behaviour. :type brand: String :param brand: The brand of the model being customized. :type model: String :param model: The model being customized. :type part: String :param part: The technical name of the part being changed. :type material: String :param material: The technical name of the material to use for the part. :type color: String :param color: The technical name of the color to use for the part. :type part_text: String :param part_text: The expected label for the part after clicking. :type material_text: String :param material_text: The expected label for the material after clicking. :type color_text: String :param color_text: The expected label for the color after clicking. :type verify: bool :param verify: If a final assertion should be performed after the selection has been done (to verify the final status). :type has_swatch: Boolean :param has_swatch: Whether there should be a swatch. """ self.select_part(part) self.select_material(material) self.select_color(material, color) if verify: self.assert_part( brand, model, part, material, color, part_text=part_text, material_text=material_text, color_text=color_text, has_swatch=has_swatch, select_part=False, ) def set_part_mobile( self, brand, model, part, material, color, part_text=None, material_text=None, color_text=None, verify=True, has_swatch=True, ): """ Makes a change to the customization of a part and checks that the pages mutates correctly, picking the right active parts, materials and colors, as well as properly switching the swatches. If the text parameters are passed an extra set of assertions are going to be performed to validate expected behaviour. This method is aimed at the mobile version of the parts selector. :type brand: String :param brand: The brand of the model being customized. :type model: String :param model: The model being customized. :type part: String :param part: The technical name of the part being changed. :type material: String :param material: The technical name of the material to use for the part. :type color: String :param color: The technical name of the color to use for the part. :type part_text: String :param part_text: The expected label for the part after clicking. :type material_text: String :param material_text: The expected label for the material after clicking. :type color_text: String :param color_text: The expected label for the color after clicking. :type verify: bool :param verify: If a final assertion should be performed after the selection has been done (to verify the final status). :type has_swatch: Boolean :param has_swatch: Whether there should be a swatch. """ self.select_part_mobile(part) self.select_material_mobile(material) self.select_color_mobile(material, color) if verify: self.assert_part_mobile( brand, model, part, material, color, part_text=part_text, material_text=material_text, color_text=color_text, has_swatch=has_swatch, select_part=False, ) def assert_part( self, brand, model, part, material, color, part_text=None, material_text=None, color_text=None, has_swatch=True, select_part=True, ): """ Checks that the part pickers have the expected state, meaning that the complete set of assertions are properly filled. If the text parameters are passed an extra set of assertions are going to be performed to validate expected behaviour. Notice that this assertion requires the changing of the current visual state, in the sense that the part tab is going to be switched to the one that is going to be asserted. :type brand: String :param brand: The brand of the model being customized. :type model: String :param model: The model being customized. :type part: String :param part: The technical name of the part being checked. :type material: String :param material: The technical name of the material used in the part. :type color: String :param color: The technical name of the color used in the part. :type part_text: String :param part_text: The expected label for the part. :type material_text: String :param material_text: The expected label for the material. :type color_text: String :param color_text: The expected label for the color. :type has_swatch: Boolean :param has_swatch: Whether there should be a swatch. :type select_part: Boolean :param select_part: If it's true then the part that is being asserted is clicked before the assertions start. This is mandatory when the part is not selected, but unnecessary otherwise. Using this option may imply performance degradation as the part selection incurs animation. """ if select_part: self.select_part(part) if part_text: self.waits.visible(".button-part.active", text=part_text) if color_text: self.waits.visible(".button-color-option.active", text=color_text) if material_text: self.waits.visible(".button-material.active", text=material_text) if has_swatch: self.waits.until( lambda d: self.core.assert_swatch( ".content .pickers .button-part.active .swatch > img", brand, model, material, color, ), "Part swatch didn't have the expected image", ) self.waits.until( lambda d: self.core.assert_swatch( ".content .pickers .button-color-option.active .swatch > img", brand, model, material, color, ), "Color swatch didn't have the expected image", ) def assert_part_mobile( self, brand, model, part, material, color, part_text=None, material_text=None, color_text=None, has_swatch=True, select_part=True, ): """ Checks that the part pickers have the expected state, meaning that the complete set of assertions are properly filled. If the text parameters are passed an extra set of assertions are going to be performed to validate expected behaviour. Notice that this assertion requires the changing of the current visual state, in the sense that the part tab is going to be switched to the one that is going to be asserted. This method is aimed at the mobile version of the parts selector. :type brand: String :param brand: The brand of the model being customized. :type model: String :param model: The model being customized. :type part: String :param part: The technical name of the part being checked. :type material: String :param material: The technical name of the material used in the part. :type color: String :param color: The technical name of the color used in the part. :type part_text: String :param part_text: The expected label for the part. :type material_text: String :param material_text: The expected label for the material. :type color_text: String :param color_text: The expected label for the color. :type has_swatch: Boolean :param has_swatch: Whether there should be a swatch. :type select_part: Boolean :param select_part: If it's true then the part that is being asserted is clicked before the assertions start. This is mandatory when the part is not selected, but unnecessary otherwise. Using this option may imply performance degradation as the part selection incurs animation. """ if select_part: self.select_part_mobile(part) if part_text: self.waits.visible(".content-mobile .button-part.active", text=part_text) if color_text: self.waits.visible( " .content-mobile .button-color-option.active", text=color_text ) if material_text: self.waits.visible( ".content-mobile .button-material.active", text=material_text ) if has_swatch: self.waits.until( lambda d: self.core.assert_swatch( ".content-mobile .pickers .button-part.active .swatch > img", brand, model, material, color, ), "Part swatch didn't have the expected image", ) self.waits.until( lambda d: self.core.assert_swatch( ".content-mobile .pickers .button-color-option.active .swatch > img", brand, model, material, color, ), "Color swatch didn't have the expected image", ) def url_model(self, model, brand): return "%s/?model=%s&brand=%s" % (self.white_url, model, brand) def url_product_id(self, product_id): return "%s/?product_id=%s" % (self.white_url, product_id) @property def base_url(self): return self.white_url @property def home_url(self): return "%s/" % self.white_url @property def next_url(self): return self.home_url @property def white_url(self): ripe_suffix = appier.conf("RIPE_SUFFIX", None) if ripe_suffix: white_url = "https://ripe-white-%s.platforme.com" % ripe_suffix else: white_url = "http://localhost:3000" white_url = appier.conf("BASE_URL", white_url) white_url = appier.conf("WHITE_URL", white_url) white_url = appier.conf("RIPE_WHITE_URL", white_url) return white_url def _capitalize_words(self, sentence): return " ".join(map(lambda s: s.capitalize(), sentence.split(" ")))
/ripe-rainbow-0.10.3.tar.gz/ripe-rainbow-0.10.3/src/ripe_rainbow/domain/logic/ripe_white.py
0.742235
0.329823
ripe_white.py
pypi
import appier from .. import parts class RipeRetailPart(parts.Part): def login(self, username, password): self.interactions.goto_url(self.login_url) self.interactions.write_text(".form input[name='username']", username) self.interactions.write_text(".form input[name='password']", password) self.interactions.press_enter(".form input[name='password']") def login_wait(self, username, password): self.login(username, password) self.waits.redirected_to(self.next_url) def select_size(self, size, gender=None, scale=None, open=True): """ Opens the size selection window, selects the proper scale and size and applies that configuration by clicking 'Apply' and closing the window. Notice that if the "open" flag is unset the window is not opened. :type size: String :param size: The size to be picked. :type gender: String :param gender: The gender that is going to be picked. :type scale: String :param scale: The scale that is going to be picked. :type open: Boolean :param open: If the size modal window should be opened before selection. """ if open: self.interactions.click(".size:not(.disabled) > .button-size") if gender: self.interactions.click(".size .button-gender", text=gender) if scale: self.interactions.click(".size .button-scale", text=str(scale)) self.interactions.click(".size .button-size", text=str(size)) self.interactions.click(".size .button.button-apply") self.waits.not_visible(".size .modal") def select_part(self, part): self.interactions.click(".pickers .button-part[data-part='%s']" % part) def select_color(self, material, color): self.interactions.click( ".pickers .button-color-option[data-material='%s'][data-color='%s']" % (material, color) ) def assert_no_part(self, part, timeout=None): self.waits.not_visible( ".pickers .button-part > p", text=part.upper(), message="The selector for the part '%s' didn't disappear" % part, timeout=timeout, ) def assert_no_material(self, part, material): self.select_part(part) self.waits.not_visible(".material li[data-material='%s']" % material) self.waits.not_visible(".pickers .button-color[data-material='%s']" % material) def assert_no_color(self, part, color): self.select_part(part) self.waits.not_visible(".pickers .button-color[data-color='%s']" % color) def set_part( self, brand, model, part, material, color, part_text=None, material_text=None, color_text=None, verify=True, has_swatch=True, ): """ Makes a change to the customization of a part and checks that the pages mutates correctly, picking the right active parts, materials and colors, as well as properly switching the swatches. If the text parameters are passed an extra set of assertions are going to be performed to validate expected behaviour. :type brand: String :param brand: The brand of the model being customized. :type model: String :param model: The model being customized. :type part: String :param part: The technical name of the part being changed. :type material: String :param material: The technical name of the material to use for the part. :type color: String :param color: The technical name of the color to use for the part. :type part_text: String :param part_text: The expected label for the part after clicking. :type material_text: String :param material_text: The expected label for the material after clicking. :type color_text: String :param color_text: The expected label for the color after clicking. :type verify: bool :param verify: If a final assertion should be performed after the selection has been done (to verify the final status). :type has_swatch: Boolean :param has_swatch: Whether there should be a swatch. """ self.select_part(part) self.select_color(material, color) if verify: self.assert_part( brand, model, part, material, color, part_text=part_text, material_text=material_text, color_text=color_text, has_swatch=has_swatch, ) def assert_part( self, brand, model, part, material, color, part_text=None, material_text=None, color_text=None, has_swatch=True, ): """ Checks that the part pickers have the expected state, meaning that the complete set of assertions are properly filled. If the text parameters are passed an extra set of assertions are going to be performed to validate expected behaviour. Notice that this assertion requires the changing of the current visual state, in the sense that the part tab is going to be switched to the one that is going to be asserted. :type brand: String :param brand: The brand of the model being customized. :type model: String :param model: The model being customized. :type part: String :param part: The technical name of the part being checked. :type material: String :param material: The technical name of the material used in the part. :type color: String :param color: The technical name of the color used in the part. :type part_text: String :param part_text: The expected label for the part. :type material_text: String :param material_text: The expected label for the material. :type color_text: String :param color_text: The expected label for the color. :type has_swatch: Boolean :param has_swatch: Whether there should be a swatch. """ self.select_part(part) if part_text: self.waits.visible(".button-part.active", text=part_text) if color_text: self.waits.visible(".button-color-option.active", text=color_text) if material_text: self.waits.visible(".button-material.active", text=material_text) if has_swatch: self.waits.until( lambda d: self.core.assert_swatch( ".pickers .button-part.active .swatch > img", brand, model, material, color, ), "Part swatch didn't have the expected image", ) self.waits.until( lambda d: self.core.assert_swatch( ".pickers .button-color-option.active .swatch > img", brand, model, material, color, ), "Color swatch didn't have the expected image", ) @property def retail_url(self): ripe_suffix = appier.conf("RIPE_SUFFIX", None) if ripe_suffix: retail_url = "https://ripe-retail-%s.platforme.com" % ripe_suffix else: retail_url = "http://localhost:8080" retail_url = appier.conf("BASE_URL", retail_url) retail_url = appier.conf("RETAIL_URL", retail_url) retail_url = appier.conf("RIPE_RETAIL_URL", retail_url) return retail_url @property def username(self): username = appier.conf("RETAIL_USERNAME", "root") username = appier.conf("RIPE_RETAIL_USERNAME", username) return username @property def password(self): password = appier.conf("RETAIL_PASSWORD", "root") password = appier.conf("RIPE_RETAIL_PASSWORD", password) return password @property def base_url(self): return self.retail_url @property def api_url(self): return "%s/api" % self.retail_url @property def admin_url(self): return "%s/admin" % self.retail_url @property def admin_api_url(self): return "%s/api/admin" % self.retail_url @property def export_url(self): return "%s/export" % self.retail_url @property def export_api_url(self): return "%s/api/export" % self.retail_url @property def home_url(self): return "%s/" % self.retail_url @property def next_url(self): return self.home_url @property def login_url(self): return "%s/login" % self.retail_url @property def logout_url(self): return "%s/logout" % self.retail_url
/ripe-rainbow-0.10.3.tar.gz/ripe-rainbow-0.10.3/src/ripe_rainbow/domain/logic/ripe_retail.py
0.613931
0.313512
ripe_retail.py
pypi
import appier from .. import parts class RipeCorePart(parts.Part): def assert_swatch(self, selector, brand, model, material, color): """ Checks that the img element identified by the selector points to the correct swatch. The correctness verification is performed by checking the `src` attribute of the element. This kind of assertion is critical to ensure proper responsiveness of the UI in accordance with part selection. :type selector: String :param selector: The selector for the img. :type brand: String :param brand: The brand of the swatch. :type model: String :param model: The model of the swatch. :type material: String :param material: The material the swatch should represent. :type color: String :param color: The color being shown in the shown. :rtype: bool :return: If the assertion was successful or not (propagation). """ element = self.waits.visible(selector, ensure=False) return self.logic.match_url( element.get_attribute("src"), self.swatch_url, params=dict(brand=brand, model=model, material=material, color=color), ) def wait_initials_image(self, selector, model, initials, profile=None): params = dict(model=model, initials=initials) if profile: params["initials_profile"] = profile return self.wait_image(selector, params=params) def wait_image(self, selector, params=None): return self.waits.has_src(selector, self.compose_url, params=params) def order_url(self, number): return "%s/orders/%d" % (self.api_url, number) def report_pdf_url(self, number, key=None): url = "%s/orders/%d/report.pdf" % (self.api_url, number) if key: url += "?key=%s" % appier.util.quote(key) return url @property def core_url(self): ripe_suffix = appier.conf("RIPE_SUFFIX", None) if ripe_suffix: core_url = "https://ripe-core-%s.platforme.com" % ripe_suffix else: core_url = "http://localhost:8080" core_url = appier.conf("CORE_URL", core_url) core_url = appier.conf("RIPE_CORE_URL", core_url) return core_url @property def username(self): username = appier.conf("CORE_USERNAME", "root") username = appier.conf("RIPE_CORE_USERNAME", username) return username @property def password(self): password = appier.conf("CORE_PASSWORD", "root") password = appier.conf("RIPE_CORE_PASSWORD", password) return password @property def base_url(self): return self.core_url @property def api_url(self): return "%s/api" % self.core_url @property def admin_url(self): return "%s/admin" % self.core_url @property def admin_api_url(self): return "%s/api/admin" % self.core_url @property def export_url(self): return "%s/export" % self.core_url @property def export_api_url(self): return "%s/api/export" % self.core_url @property def orders_url(self): return "%s/orders" % self.api_url @property def swatch_url(self): return "%s/swatch" % self.api_url @property def compose_url(self): return "%s/compose" % self.api_url
/ripe-rainbow-0.10.3.tar.gz/ripe-rainbow-0.10.3/src/ripe_rainbow/domain/logic/ripe_core.py
0.747892
0.309363
ripe_core.py
pypi
import os import appier from .. import parts class InteractionsPart(parts.Part): def goto_url(self, url, redirect_url=None, params=[], fragment="", wait=True): """ Navigates to a certain URL with given GET parameters and to the request fragment. The operation is always going to be performed using a GET request as that's the only available request method from the browser. By default the redirection is waited to verify that the final redirection is performed. :type url: String :param url: The URL to navigate to, this should represent only the base URL without GET parameters and fragment. :type redirect_url: String :param redirect_url: The target URL of the redirection (if different from the target one) to be used in the wait verification. :type params: list :param params: A list of (key, values) tuples representing the GET query parameters to be added to the base URL :type fragment: String :param fragment: The fragment string to be added to the last part of the URL to be built. :type wait: bool :param wait: If the engine should wait until the browser URL bar is set to the destination URL (could pose issues with HTTP redirection). :rtype: bool :return: The result of the redirection, if it was verified by the browser. """ params_s = [] for (key, value) in params: key_q = appier.util.quote(key) for _value in value: value_q = appier.util.quote(_value) param = key_q + "=" + value_q params_s.append(param) params_s = "&".join(params_s) if params_s else "" if params_s: url += "?" + params_s if fragment: url += "#" + fragment self.driver.get(url) if not wait: return return self.waits.redirected_to(redirect_url or url) def write_text(self, selector, text): """ Writes the text in the given element, this means having it typed like in a physical keyboard. :type selector: String :param selector: The selector for the element to write the text in. :type text: String :param text: The text to write "using the keyboard". :rtype: Element :return: The element with the text changed. """ # waits until the element is visible for the selector and then # retrieves the reference to it to be able to write text element = self.waits.visible(selector) # waits until a valid text change in the element is possible, this # overcomes limitations with non interactable elements return self.waits.until( lambda d: self.driver.safe(self.driver.write_text, element, text), "Element '%s' found but never became writable" % selector, ) def press_key(self, selector, key): """ Presses the provided key on a certain element, pressed like having the proper enter key pressed. :type selector: String :param selector: The selector for the element to focus when pressing enter. :type key: String :param key: The name of the key that is going to be pressed by the keyboard, this name is set on an agnostic way. :rtype: Element :return: The element with the key pressed. """ # waits until the element is visible for the selector and then # retrieves the reference to it to be able to press the key element = self.waits.visible(selector) # waits until a valid key stroke in the element is possible, this # overcomes limitations with non interactable elements return self.waits.until( lambda d: self.driver.safe(self.driver.press_key, element, key), "Element '%s' found but never became interactable" % selector, ) def press_enter(self, selector): """ Presses the enter key on a certain element, pressed like having the proper enter key pressed. :type selector: String :param selector: The selector for the element to focus when pressing enter. :rtype: Element :return: The element with the enter key pressed. """ # waits until the element is visible for the selector and then # retrieves the reference to it to be able to press enter element = self.waits.visible(selector) # waits until a valid key stroke in the element is possible, this # overcomes limitations with non interactable elements return self.waits.until( lambda d: self.driver.safe(self.driver.press_enter, element), "Element '%s' found but never became interactable" % selector, ) def click(self, selector, text=None): """ Clicks an element when possible, which happens when that element is both visible and "clickable". :type selector: String :param selector: The selector for the element to click. :type text: String :param text: The text the selected element must have before being clicked. :rtype: Element :return: The clicked element if there's any otherwise an invalid value. """ # waits until the try click operation is possible meaning that a # proper click has been "done" by the driver return self.waits.until( lambda d: self._click(selector, text=text), "Element '%s' found but never became clickable" % selector, ) def set_file(self, selector, path): """ Sets a file to a file input element when possible, which happens when that element exists. This operation only sets the file association in the target element the upload/send operation should be triggered by the containing form element. :type selector: String :param selector: The selector for the file input element to set the file to, the file will only be sent once the form where the element is contained is submitted. :type path: String :param path: The path to the file being sent in the local filesystem. :rtype: Element :return: The file input element if there's any otherwise an invalid value. """ # normalizes the path (that can be relative) so that it can # be safely used by the de underlying element changing operations path = os.path.abspath(path) path = os.path.normpath(path) # waits until the try set operation is possible meaning that # the target element exists and the upload was successful return self.waits.until( lambda d: self._set_file(selector, path), "Could not set '%s' to '%s'" % (path, selector), ) def highlight(self, selector, text=None): # waits until the element is visible for the selector and then # retrieves the reference to it to be able to press enter element = self.waits.visible(selector, text=text) # waits until the highlight operation is possible for the element # that has just been ensured as visible return self.waits.until( lambda d: self.driver.safe(self.driver.highlight, element), "Element '%s' found but was not possible to highlight it" % selector, ) def lowlight(self, selector, text=None): # waits until the element is visible for the selector and then # retrieves the reference to it to be able to press enter element = self.waits.visible(selector, text=text) # waits until the lowlight operation is possible for the element # that has just been ensured as visible return self.waits.until( lambda d: self.driver.safe(self.driver.lowlight, element), "Element '%s' found but was not possible to lowlight it" % selector, ) def switch_tab(self, tab): return self.driver.switch_tab(tab) def switch_context(self, name="native", index=0): self.waits.until( lambda d: self.driver.count_context(name) > index, "Expecting the number of contexts to be at least '%d' but is '%d'" % (index, self.driver.count_context(name)), ) return self.driver.switch_context(name, dict(index=index)) def close_tab(self, tab=None): return self.driver.close_tab(tab) @property def url(self): return self.driver.current_url def _click(self, selector, text=None): """ Inner method that takes the selector and the possible text value of a target element and tries to run a click operation in it. This method is ready to be used within a waits environment so that proper repetition may happen. :type selector: String :param selector: The selector for the element to click. :type text: String :param text: The text the selected element must have before being clicked. :rtype: Element :return: The clicked element if there's any otherwise an invalid value. """ element = self.waits._get_element( selector, text=text, displayed=False, visible=False ) if not element: return None return self.driver.safe(self.driver.click, element) def _set_file(self, selector, path): """ Inner method that tries to set the file given by path to the file input element defined by the selector. This method is ready to be used within a waits environment so that proper repetition may happen. :type selector: String :param selector: The selector for the file input element to set the file to. :type path: String :param path: The local filesystem path to the file being sent. :rtype: Element :return: The file input element if there's any otherwise an invalid value. """ element = self.logic.get(selector) if not element: return None return self.driver.safe(self.driver.write_text, element, path, False)
/ripe-rainbow-0.10.3.tar.gz/ripe-rainbow-0.10.3/src/ripe_rainbow/domain/base/interactions.py
0.680985
0.394493
interactions.py
pypi
import unittest import appier import ripe_rainbow class LogicPartTest(unittest.TestCase): def test_match_url(self): test_case = ripe_rainbow.TestCase() logic_part = ripe_rainbow.LogicPart(test_case) self.assertEqual( logic_part.match_url("http://www.platforme.com", "http://www.platforme.com"), True, ) self.assertEqual( logic_part.match_url("http://www.platforme.com", "http://www.platforme.come"), False, ) self.assertEqual( logic_part.match_url("http://www.platforme.com", "http://www.platforme.co"), False, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com", "http://www.platforme.com?param1=value1" ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1", "http://www.platforme.com" ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1", "http://www.platforme.com", params=dict(param1="value1"), ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param2=value2", "http://www.platforme.com", params=dict(param1="value1"), ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param1=value2", "http://www.platforme.com", params=dict(param1=["value1", "value2"]), ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param1=value2", "http://www.platforme.com", params=dict(param1=["value2", "value1"]), ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param1=value2", "http://www.platforme.com", params=dict(param1=["value1", "value2", "value3"]), ), False, ) def test_match_url_strict(self): test_case = ripe_rainbow.TestCase() logic_part = ripe_rainbow.LogicPart(test_case) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param2=value2", "http://www.platforme.com", params=dict(param1="value1"), strict=True, ), False, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param2=value2", "http://www.platforme.com", params=dict(param1="value1", param2="value2"), strict=True, ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param2=value2#anchor1", "http://www.platforme.com", params=dict(param1="value1", param2="value2"), fragment="anchor1", strict=True, ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param1=value2#anchor1", "http://www.platforme.com", params=dict(param1=["value1", "value2"]), fragment="anchor1", strict=True, ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param1=value2#anchor1", "http://www.platforme.com", params=dict(param1=["value2", "value1"]), fragment="anchor1", strict=True, ), False, ) def test_match_url_bytes(self): test_case = ripe_rainbow.TestCase() logic_part = ripe_rainbow.LogicPart(test_case) self.assertEqual( logic_part.match_url( appier.legacy.bytes( "http://www.platforme.com?param1=value1&param1=value2#anchor1" ), "http://www.platforme.com", params=dict(param1=["value2", "value1"]), fragment="anchor1", strict=True, ), False, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param1=value2#anchor1", appier.legacy.bytes("http://www.platforme.com"), params=dict(param1=["value2", "value1"]), fragment="anchor1", strict=True, ), False, ) self.assertEqual( logic_part.match_url( appier.legacy.bytes( "http://www.platforme.com?param1=value1&param1=value2#anchor1" ), appier.legacy.bytes("http://www.platforme.com"), params=dict(param1=["value2", "value1"]), fragment="anchor1", strict=True, ), False, ) self.assertEqual( logic_part.match_url( appier.legacy.bytes( "http://www.platforme.com?param1=value1&param2=value2" ), "http://www.platforme.com", params=dict(param1="value1", param2="value2"), strict=True, ), True, ) self.assertEqual( logic_part.match_url( "http://www.platforme.com?param1=value1&param2=value2", appier.legacy.bytes("http://www.platforme.com"), params=dict(param1="value1", param2="value2"), strict=True, ), True, ) self.assertEqual( logic_part.match_url( appier.legacy.bytes( "http://www.platforme.com?param1=value1&param2=value2" ), appier.legacy.bytes("http://www.platforme.com"), params=dict(param1="value1", param2="value2"), strict=True, ), True, )
/ripe-rainbow-0.10.3.tar.gz/ripe-rainbow-0.10.3/src/ripe_rainbow/unit/domain/base/logic.py
0.502686
0.566738
logic.py
pypi
class AtlasSource(object): """ Class responsible for creating an Atlas source object that holds information about the number of probes, the type of the source and the value of the source. This object can be passed as source argument later on when we call AtlasRequest. Usage: from ripe.atlas.cousteau import AtlasSource as = AtlasSource(**{"type": "area", "value": "WW", "requested": 5}) """ # available types types_available = ["area", "country", "prefix", "asn", "probes", "msm"] def __init__(self, **kwargs): if "requested" in kwargs: self.requested = kwargs["requested"] else: self._requested = None if "value" in kwargs: self.value = kwargs["value"] else: self._value = None if "type" in kwargs: self.type = kwargs["type"] else: self._type = None if "tags" in kwargs: self.tags = kwargs["tags"] else: self._tags = None # requested attribute def get_requested(self): """Getter for requested attribute""" return self._requested def set_requested(self, value): """Setter for requested attribute""" self._requested = value doc_req = "Defines how many probes will be requested." requested = property(get_requested, set_requested, doc=doc_req) # value attribute def get_value(self): """Getter for value attribute""" return self._value def set_value(self, value): """Setter for value attribute""" self._value = value doc_value = "Defines the value of the type of probe's source." value = property(get_value, set_value, doc=doc_value) # type attribute def get_type(self): """Getter for type attribute""" return self._type def set_type(self, value): """Setter for type attribute""" if value not in self.types_available: log = "Sources field 'type' should be in one of %s" % ( self.types_available ) raise MalFormattedSource(log) self._type = value doc_type = "Defines the type of probe's source." type = property(get_type, set_type, doc=doc_type) # tags attribute def get_tags(self): """Getter for tags attribute""" return self._tags def set_tags(self, value): """Setter for tags attribute""" log = ( 'Sources fields "tags" should be a dict in the format ' '{"include": [ "tag1", "tag2", "tagN" ],' '"exclude": [ "tag1", "tag2", "tagN" ] }' ) if not isinstance(value, dict): raise MalFormattedSource(log) if not set(value.keys()).issubset(set(["include", "exclude"])): raise MalFormattedSource(log) for tag_list in value.values(): if not isinstance(tag_list, list): raise MalFormattedSource(log) if [tag for tag in tag_list if not isinstance(tag, str)]: raise MalFormattedSource(log) self._tags = value doc_tags = "Defines optional tags to filter probes." tags = property(get_tags, set_tags, doc=doc_tags) def clean(self): """ Cleans/checks user has entered all required attributes. This might save some queries from being sent to server if they are totally wrong. """ if not all([self._requested, self._value, self._type]): raise MalFormattedSource( "<requested, value, type> fields are required." ) def build_api_struct(self): """ Calls the clean method of the class and returns the info in a structure that Atlas API is accepting. """ self.clean() r = { "type": self._type, "requested": self._requested, "value": self._value } if self._tags: r["tags"] = self._tags return r class AtlasChangeSource(AtlasSource): """ Class responsible for creating an Atlas source object for changing participants probes for a measurement. Usage: from ripe.atlas.cousteau import AtlasChangeSource as = AtlasChangeSource(**{"type":"area", "value": "WW", "requested": 5}) """ def __init__(self, **kwargs): if "action" in kwargs: self.action = kwargs["action"] else: self._action = None super(AtlasChangeSource, self).__init__(**kwargs) # type attribute def get_type(self): """Getter for type attribute""" return self._type def set_type(self, value): """Setter for type attribute""" if self.action == "remove" and value != "probes": log = "Sources field 'type' when action is remove should always be 'probes'." raise MalFormattedSource(log) self._type = value doc_type = "Defines the type of probe's source." type = property(get_type, set_type, doc=doc_type) # tags attribute def get_tags(self): """Getter for tags attribute""" return self._tags def set_tags(self, value): """Setter for tags attribute""" if self.action == "remove": log = ( "Tag-based filtering can only be used when adding " "participant probes for a measurement." ) raise MalFormattedSource(log) super(AtlasChangeSource, self).set_tags(value) doc_tags = "Defines optional tags to filter probes." tags = property(get_tags, set_tags, doc=doc_tags) # action attribute def get_action(self): """Getter for action attribute""" return self._action def set_action(self, value): """Setter for action attribute""" if value not in ("remove", "add"): log = "Sources field 'action' should be 'remove' or 'add'." raise MalFormattedSource(log) self._action = value doc_action = "Defines the action (remove/add if the change source)." action = property(get_action, set_action, doc=doc_action) def clean(self): """ Cleans/checks user has entered all required attributes. This might save some queries from being sent to server if they are totally wrong. """ if not all([self._type, self._requested, self._value, self._action]): raise MalFormattedSource( "<type, requested, value, action> fields are required." ) def build_api_struct(self): """ Calls parent's method and just adds the addtional field 'action', that is required to form the structure that Atlas API is accepting. """ data = super(AtlasChangeSource, self).build_api_struct() data.update({"action": self._action}) return data class MalFormattedSource(Exception): """Custom Exception class for malformed sources""" pass __all__ = ["AtlasSource", "AtlasChangeSource"]
/ripe.atlas.cousteau-2.0.0-py3-none-any.whl/ripe/atlas/cousteau/source.py
0.760028
0.294684
source.py
pypi
class AtlasMeasurement(object): """ Parent class for creating an Atlas measurement object containing all needed options for ATLAS API. The different kind of measurements are specified as child classes. These objects can be passed as measurement arguments later on when we call AtlasRequest. To use this class directly a "type" must be provided: Usage: from ripe.atlas.cousteau import AtlasMeasurement msm = AtlasMeasurement(**{ "type": "ping", "target": "www.google.gr", "af": 4, "description": "testing AtlasMeasurement", }) """ def __init__(self, **kwargs): # set to store all options that are used self.used_options = set() # required options for definitions part self.required_options = ["description", "af"] self.measurement_type = kwargs.get("type", "") self._init(**kwargs) def _init(self, **kwargs): """ Initializing required options and set them as attributes as well as options coming from user. """ self._init_required_options(**kwargs) self.add_option(**kwargs) def _store_option(self, option): """ Store option in the used option set. This way we can keep track which options user has select to add to instance. This set is used at the build_api_struct function when we build the desired data structure from user's input. """ self.used_options.add(option) def add_option(self, **options): """ Adds an option and its value to the class as an attribute and stores it to the used options set. """ for option, value in options.items(): setattr(self, option, value) self._store_option(option) def _init_required_options(self, **kwargs): """ Initialize the required option as class members. The value will be either None or the specified value in the kwargs or __init__. The logic here is to make the required options accesible to edit after a class instance has been created. """ for field in self.required_options: setattr(self, field, kwargs.get(field)) self._store_option(field) def clean(self): """ Cleans/checks user entered data making sure required options are at least present. This might save some queries from being sent if they are totally wrong. """ # make sure the correct measurement type is set. if not self.measurement_type: log = "Please define a valid measurement type." raise MalFormattedMeasurement(log) # make sure the required fields are set. for roption in self.required_options: if getattr(self, roption, None) is None: log = "%s Measurement field: <%s> is required" % ( self.__class__.__name__, roption ) raise MalFormattedMeasurement(log) def v2_translator(self, option): """ This is a temporary function that helps move from v1 API to v2 without breaking already running script and keep backwards compatibility. Translates option name from API v1 to renamed one of v2 API. """ new_option = option new_value = getattr(self, option) renaming_pairs = { "dontfrag": "dont_fragment", "maxhops": "max_hops", "firsthop": "first_hop", "use_NSID": "set_nsid_bit", "cd": "set_cd_bit", "do": "set_do_bit", "qbuf": "include_qbuf", "recursion_desired": "set_rd_bit", "noabuf": "include_abuf" } if option in renaming_pairs.keys(): warninglog = ( "DeprecationWarning: {0} option has been deprecated and " "renamed to {1}." ).format(option, renaming_pairs[option]) print(warninglog) new_option = renaming_pairs[option] # noabuf was changed to include_abuf so we need a double-negative if option == "noabuf": new_value = not new_value return new_option, new_value def build_api_struct(self): """ Calls the clean method of the class and returns the info in a structure that Atlas API is accepting. """ self.clean() data = {"type": self.measurement_type} # add all options for option in self.used_options: option_key, option_value = self.v2_translator(option) data.update({option_key: option_value}) return data class Ping(AtlasMeasurement): """Class for creating a ping measurement Usage: from ripe.atlas.cousteau import Ping ping = Ping(**{ "target": "www.google.gr", "af": 4, "description": "testing new wrapper" }) """ def __init__(self, **kwargs): super(Ping, self).__init__(**kwargs) self.measurement_type = "ping" self.required_options.extend(["target"]) self._init(**kwargs) class Traceroute(AtlasMeasurement): """Class for creating a traceroute measurement""" def __init__(self, **kwargs): super(Traceroute, self).__init__(**kwargs) self.measurement_type = "traceroute" self.required_options.extend(["target", "protocol"]) self._init(**kwargs) class Dns(AtlasMeasurement): """Class for creating a DNS measurement""" def __init__(self, **kwargs): super(Dns, self).__init__(**kwargs) self.measurement_type = "dns" self.required_options.extend( ["query_class", "query_type", "query_argument"] ) self._init(**kwargs) class Sslcert(AtlasMeasurement): """Class for creating an SSL certificate measurement""" def __init__(self, **kwargs): super(Sslcert, self).__init__(**kwargs) self.measurement_type = "sslcert" self.required_options.extend(["target"]) self._init(**kwargs) class Ntp(AtlasMeasurement): """Class for creating an NTP measurement""" def __init__(self, **kwargs): super(Ntp, self).__init__(**kwargs) self.measurement_type = "ntp" self.required_options.extend(["target"]) self._init(**kwargs) class Http(AtlasMeasurement): """Class for creating an HTTP measurement""" def __init__(self, **kwargs): super(Http, self).__init__(**kwargs) self.measurement_type = "http" self.required_options.extend(["target"]) self._init(**kwargs) class MalFormattedMeasurement(Exception): pass __all__ = ["Ping", "Traceroute", "Dns", "Sslcert", "Ntp", "Http"]
/ripe.atlas.cousteau-2.0.0-py3-none-any.whl/ripe/atlas/cousteau/measurement.py
0.857156
0.419529
measurement.py
pypi
import calendar import requests from dateutil import parser from datetime import datetime from .version import __version__ class AtlasRequest(object): """ Base class for doing Atlas requests. Contains functions that can be used by most Atlas requests. """ http_methods = { "GET": requests.get, "POST": requests.post, "DELETE": requests.delete } def __init__(self, **kwargs): self.url = "" self.key = kwargs.get("key") self.url_path = kwargs.get("url_path", "") self.server = kwargs.get("server") or "atlas.ripe.net" self.verify = kwargs.get("verify", True) self.proxies = kwargs.get("proxies", {}) self.headers = kwargs.get("headers", None) default_user_agent = "RIPE ATLAS Cousteau v{0}".format(__version__) self.http_agent = kwargs.get("user_agent") or default_user_agent self.http_method_args = { "params": {}, "headers": self.get_headers(), "verify": self.verify, "proxies": self.proxies } self.post_data = {} def get_headers(self): """Return header for the HTTP request.""" headers = { "User-Agent": self.http_agent, "Content-Type": "application/json", "Accept": "application/json" } if self.key: headers["Authorization"] = f"Key {self.key}" if self.headers: headers.update(self.headers) return headers def http_method(self, method): """ Execute the given HTTP method and returns if it's success or not and the response as a string if not success and as python object after unjson if it's success. """ self.build_url() try: response = self.get_http_method(method) is_success = response.ok try: response_message = response.json() except ValueError: response_message = response.text except requests.exceptions.RequestException as exc: is_success = False response_message = exc.args return is_success, response_message def get_http_method(self, method): """Gets the http method that will be called from the requests library""" return self.http_methods[method](self.url, **self.http_method_args) def build_url(self): """ Builds the request's url combining server and url_path classes attributes. """ self.url = "https://{0}{1}".format(self.server, self.url_path) def get(self, **url_params): """ Makes the HTTP GET to the url. """ if url_params: self.http_method_args["params"].update(url_params) return self.http_method("GET") def post(self): """ Makes the HTTP POST to the url sending post_data. """ self._construct_post_data() post_args = {"json": self.post_data} self.http_method_args.update(post_args) return self.http_method("POST") def _construct_post_data(self): raise NotImplementedError def clean_time(self, time): """ Transform time field to datetime object if there is any. """ if isinstance(time, int): time = datetime.utcfromtimestamp(time) elif isinstance(time, str): time = parser.parse(time) return time class AtlasCreateRequest(AtlasRequest): """ Class responsible for creating a request for creating a new Atlas measurement. Takes as arguments Atlas API key, a list of Atlas measurement objects and a list of Atlas sources. Optionally the start and end time and whether the measurement is a oneoff can be specified. Usage: from ripe.atlas import AtlasCreateRequest ar = AtlasCreateRequest(**{ "start_time": start, "stop_time": stop, "key": "path_to_key", "measurements":[measurement1, ...], "sources": [source1, ...], "is_oneoff": True/False }) ar.create() """ def __init__(self, **kwargs): super(AtlasCreateRequest, self).__init__(**kwargs) self.url_path = '/api/v2/measurements/' self.measurements = kwargs["measurements"] self.sources = kwargs["sources"] self.start_time = self.clean_time(kwargs.get("start_time")) self.stop_time = self.clean_time(kwargs.get("stop_time")) self.bill_to = kwargs.get("bill_to") if kwargs.get("is_oneoff"): self.is_oneoff = kwargs["is_oneoff"] else: self.is_oneoff = False def _construct_post_data(self): """ Constructs the data structure that is required from the atlas API based on measurements, sources and times user has specified. """ definitions = [msm.build_api_struct() for msm in self.measurements] probes = [source.build_api_struct() for source in self.sources] self.post_data = { "definitions": definitions, "probes": probes, "is_oneoff": self.is_oneoff } if self.is_oneoff: self.post_data.update({"is_oneoff": self.is_oneoff}) if self.start_time: self.post_data.update( {"start_time": int(calendar.timegm(self.start_time.timetuple()))} ) if self.stop_time: self.post_data.update( {"stop_time": int(calendar.timegm(self.stop_time.timetuple()))} ) if self.bill_to: self.post_data.update({"bill_to": self.bill_to}) def create(self): """Sends the POST request""" return self.post() class AtlasChangeRequest(AtlasRequest): """Atlas request for changing probes for a running measurement. post_data = [{ "action": "add|remove", "requested": probe_number, # when action=remove only probes is supported "type": "area|country|asn|prefix|msm|probes", "value": probe_values }] """ def __init__(self, **kwargs): super(AtlasChangeRequest, self).__init__(**kwargs) self.url_path = '/api/v2/measurements/{0}/participation-requests/' self.msm_id = kwargs["msm_id"] self.sources = kwargs["sources"] self.url_path = self.url_path.format(self.msm_id) def _construct_post_data(self): """ Constructs the data structure that is required from the atlas API based on measurement id, and the sources. """ self.post_data = [source.build_api_struct() for source in self.sources] def create(self): """Sends the POST request""" return self.post() class AtlasStopRequest(AtlasRequest): """Atlas request for stopping a measurement.""" def __init__(self, **kwargs): super(AtlasStopRequest, self).__init__(**kwargs) self.url_path = '/api/v2/measurements/' self.msm_id = kwargs["msm_id"] self.url_path = "{0}{1}".format(self.url_path, self.msm_id) def delete(self): """ Makes the HTTP DELETE to the url. """ return self.http_method("DELETE") def create(self): """Sends the DELETE request""" return self.delete() class AtlasLatestRequest(AtlasRequest): def __init__(self, msm_id, probe_ids=(), **kwargs): super(AtlasLatestRequest, self).__init__(**kwargs) self.url_path = "/api/v2/measurements/{0}/latest" self.msm_id = msm_id self.probe_ids = None self.url_path = self.url_path.format(self.msm_id) if probe_ids: self.add_probe_parameters(probe_ids) def add_probe_parameters(self, probe_ids): """ Creates string format if needed and add probe ids to HTTP query parameters. """ if isinstance(probe_ids, (tuple, list)): # tuples & lists > x,y,z self.probe_ids = ",".join([str(_) for _ in probe_ids]) else: self.probe_ids = probe_ids additional_params = { "probe_ids": self.probe_ids } self.http_method_args["params"].update(additional_params) def create(self): """Sends the GET request.""" return self.get() class AtlasResultsRequest(AtlasRequest): """Atlas request for fetching results of a measurement.""" def __init__(self, **kwargs): super(AtlasResultsRequest, self).__init__(**kwargs) self.url_path = '/api/v2/measurements/{0}/results' self.msm_id = kwargs["msm_id"] self.start = self.clean_time(kwargs.get("start")) self.stop = self.clean_time(kwargs.get("stop")) self.probe_ids = self.clean_probes(kwargs.get("probe_ids")) self.url_path = self.url_path.format(self.msm_id) self.update_http_method_params() def clean_probes(self, probe_ids): """ Checks format of probe ids and transform it to something API understands. """ if isinstance(probe_ids, (tuple, list)): # tuples & lists > x,y,z probe_ids = ",".join([str(_) for _ in probe_ids]) return probe_ids def update_http_method_params(self): """ Update HTTP url parameters based on msm_id and query filters if there are any. """ url_params = {} if self.start: url_params.update( {"start": int(calendar.timegm(self.start.timetuple()))} ) if self.stop: url_params.update( {"stop": int(calendar.timegm(self.stop.timetuple()))} ) if self.probe_ids: url_params.update({"probe_ids": self.probe_ids}) self.http_method_args["params"].update(url_params) def create(self): """Sends the GET request.""" return self.get() __all__ = [ "AtlasStopRequest", "AtlasCreateRequest", "AtlasChangeRequest", "AtlasRequest", "AtlasResultsRequest" ]
/ripe.atlas.cousteau-2.0.0-py3-none-any.whl/ripe/atlas/cousteau/request.py
0.728265
0.156652
request.py
pypi
Changelog ========= * 1.3.0 * abuf.py: error handling for NS records, extended rcode, cookies and client subnets * 1.2.2 * Catch problems parsing SSL certificates * 1.2.1 * Add support for non-DNS names in subjectAltName extensions * 1.2 * Replaced pyOpenSSL with cryptography * Added parsing of subjectAltName X509 extension * 1.1.11 * Added first version of WiFi results * 1.1.10 * Added a `parse_all_hops` kwarg to the Traceroute class to tell Sagan to stop parsing Hops and Packets once we have all of the last hop statistics (default=True) * Remove dependency on IPy: we were using it for IPv6 canonicalization, but all IPv6 addresses in results should be in canonical form to start with. * 1.1.9 * Removed the `parse_abuf` script because no one was using it and its Python3 support was suspect anyway. * 1.1.8 * Handle case where a traceroute result might not have ``dst_addr`` field. * 1.1.7 * Change condition of traceroute's ``last_hop_responded`` flag. * Add couple of more traceroute's properties. ``is_success`` and ``last_hop_errors``. * Add tests to the package itself. * 1.1.6 * Fix for `Issue #56`_ a case where the ``qbuf`` value wasn't being properly captured. * Fixed small bug that didn't accurately capture the ``DO`` property from the qbuf. * 1.1.5 * We now ignore so-called "late" packets in traceroute results. This will likely be amended later as future probe firmwares are expected to make better use of this value, but until then, Sagan will treat these packets as invalid. * 1.1.4 * Added a ``type`` attribute to all ``Result`` subclasses * Added support for a lot of new DNS answer types, including ``NSEC``, ``PTR``, ``SRV``, and more. These answers do not yet have a complete string representation however. * 1.1.3 * Changed the name of ``TracerouteResult.rtt_median`` to ``TracerouteResult.last_rtt_median``. * Modified the ``DnsResult`` class to allow the "bubbling up" of error statuses. * 1.1.2 * We skipped this number for some reason :-/ * 1.1.1 * Fixed a `string representation bug`_ found by `iortiz`_ * 1.1.0 * **Breaking Change**: the ``Authority`` and ``Additional`` classes were removed, replaced with the appropriate answer types. For the most part, this change should be invisible, as the common properties are the same, but if you were testing code against these class types, you should consider this a breaking change. * **Breaking Change**: The ``__str__`` format for DNS ``RrsigAnswer`` to conform the output of a typical ``dig`` binary. * Added ``__str__`` definitions to DNS answer classes for use with the toolkit. * In an effort to make Sagan (along with Cousteau and the toolkit) more portable, we dropped the requirement for the ``arrow`` package. * 1.0.0 * 1.0! w00t! * **Breaking Change**: the ``data`` property of the ``TxtAnswer`` class was changed from a string to a list of strings. This is a correction from our own past deviation from the RFC, so we thought it best to conform as part of the move to 1.0.0 * Fixed a bug where non-ascii characters in DNS TXT answers resulted in an exception. * 0.8.2 * Fixed a bug related to non-ascii characters in SSL certificate data. * Added a wrapper for json loaders to handle differences between ujson and the default json module. * 0.8.1 * Minor fix to make all ``Result`` objects properly JSON serialisable. * 0.8.0 * Added `iortiz`_'s patch for flags and ``flags`` and ``sections`` properties on DNS ``Answer`` objects. * 0.7.1 * Changed ``README.md`` to ``README.rst`` to play nice with pypi. * 0.7 * Added `pierky`_'s new ``RRSigAnswer`` class to the dns parser. * 0.6.3 * Fixed a bug in how Sagan deals with inappropriate firmware versions * 0.6.2 * Added `pierky`_'s fix to fix AD and CD flags parsing in DNS Header * 0.6.1 * Added ``rtt_min``, ``rtt_max``, ``offset_min``, and ``offset_max`` to ``NTPResult`` * 0.6.0 * Support for NTP measurements * Fixes for how we calculate median values * Smarter setup.py * 0.5.0 * Complete Python3 support! * 0.4.0 * Added better Python3 support. Tests all pass now for ping, traceroute, ssl, and http measurements. * Modified traceroute results to make use of ``destination_ip_responded`` and ``last_hop_responded``, deprecating ``target_responded``. See the docs for details. * 0.3.0 * Added support for making use of some of the pre-calculated values in DNS measurements so you don't have to parse the abuf if you don't need it. * Fixed a bug in the abuf parser where a variable was being referenced by never defined. * Cleaned up some of the abuf parser to better conform to pep8. * 0.2.8 * Fixed a bug where DNS ``TXT`` results with class ``IN`` were missing a ``.data`` value. * Fixed a problem in the SSL unit tests where ``\n`` was being misinterpreted. * 0.2.7 * Made abuf more robust in dealing with truncation. * 0.2.6 * Replaced ``SslResult.get_checksum_chain()`` with the ``SslResult.checksum_chain`` property. * Added support for catching results with an ``err`` property as an actual error. * 0.2.5 * Fixed a bug in how the ``on_error`` and ``on_malformation`` preferences weren't being passed down into the subcomponents of the results. * 0.2.4 * Support for ``seconds_since_sync`` across all measurement types * 0.2.3 * "Treat a missing Type value in a DNS result as a malformation" (Issue #36) * 0.2.2 * Minor bugfixes * 0.2.1 * Added a ``median_rtt`` value to traceroute ``Hop`` objects. * Smarter and more consistent error handling in traceroute and HTTP results. * Added an ``error_message`` property to all objects that is set to ``None`` by default. * 0.2.0 * Totally reworked error and malformation handling. We now differentiate between a result (or portion thereof) being malformed (and therefore unparsable) and simply containing an error such as a timeout. Look for an ``is_error`` property or an ``is_malformed`` property on every object to check for it, or simply pass ``on_malformation=Result.ACTION_FAIL`` if you'd prefer things to explode with an exception. See the documentation for more details * Added lazy-loading features for parsing abuf and qbuf values out of DNS results. * Removed the deprecated properties from ``dns.Response``. You must now access values like ``edns0`` from ``dns.Response.abuf.edns0``. * More edge cases have been found and accommodated. * 0.1.15 * Added a bunch of abuf parsing features from `b4ldr`_ with some help from `phicoh`_. * 0.1.14 * Fixed the deprecation warnings in ``DnsResult`` to point to the right place. * 0.1.13 * Better handling of ``DNSResult`` errors * Rearranged the way abufs were handled in the ``DnsResult`` class to make way for ``qbuf`` values as well. The old method of accessing ``header``, ``answers``, ``questions``, etc is still available via ``Response``, but this will go away when we move to 0.2. Deprecation warnings are in place. * 0.1.12 * Smarter code for checking whether the target was reached in ``TracerouteResults``. * We now handle the ``destination_option_size`` and ``hop_by_hop_option_size`` values in ``TracerouteResult``. * Extended support for ICMP header info in traceroute ``Hop`` class by introducing a new ``IcmpHeader`` class. * 0.1.8 * Broader support for SSL checksums. We now make use of ``md5`` and ``sha1``, as well as the original ``sha256``. .. _Issue #56: https://github.com/RIPE-NCC/ripe.atlas.sagan/issues/56 .. _string representation bug: https://github.com/RIPE-NCC/ripe-atlas-tools/issues/1 .. _b4ldr: https://github.com/b4ldr .. _phicoh: https://github.com/phicoh .. _iortiz: https://github.com/iortiz .. _pierky: https://github.com/pierky
/ripe.atlas.sagan-1.3.0.tar.gz/ripe.atlas.sagan-1.3.0/CHANGES.rst
0.816553
0.680142
CHANGES.rst
pypi
from datetime import datetime from dateutil.relativedelta import relativedelta from pytz import UTC from .base import Result, ResultParseError, ParsingDict class Packet(ParsingDict): """ Model for data structure of each packet for a NTP result. """ NTP_EPOCH = datetime(1900, 1, 1, tzinfo=UTC) def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.rtt = None self.offset = None if "rtt" not in data: return try: self.rtt = round(float(data["rtt"]), 3) except (ValueError, TypeError): raise ResultParseError( 'RTT "{rtt}" does not appear to be a float'.format( rtt=data["rtt"]) ) self.offset = self.ensure("offset", float) self.final_timestamp = self.ensure("final-ts", float) self.origin_timestamp = self.ensure("origin-ts", float) self.received_timestamp = self.ensure("receive-ts", float) self.transmitted_timestamp = self.ensure("transmit-ts", float) # Caching self._final_time = None self._origin_time = None self._received_time = None self._transmitted_time = None def __str__(self): return "{rtt}|{offset}".format(rtt=self.rtt, offset=self.offset) @property def final_time(self): if not self._final_time and self.final_timestamp: self._final_time = self.NTP_EPOCH + relativedelta( seconds=self.final_timestamp) return self._final_time @property def origin_time(self): if not self._origin_time and self.origin_timestamp: self._origin_time = self.NTP_EPOCH + relativedelta( seconds=self.origin_timestamp) return self._origin_time @property def received_time(self): if not self._received_time and self.received_timestamp: self._received_time = self.NTP_EPOCH + relativedelta( seconds=self.received_timestamp) return self._received_time @property def transmitted_time(self): if not self._transmitted_time and self.transmitted_timestamp: self._transmitted_time = self.NTP_EPOCH + relativedelta( seconds=self.transmitted_timestamp) return self._transmitted_time class NtpResult(Result): """ Subclass to cover ntp type measurement results. """ def __init__(self, data, **kwargs): Result.__init__(self, data, **kwargs) self.rtt_median = None self.rtt_min = None self.rtt_max = None self.offset_median = None self.offset_min = None self.offset_max = None self.af = self.ensure("af", int) self.protocol = self.ensure("proto", str) self.destination_address = self.ensure("dst_addr", str) self.destination_name = self.ensure("dst_name", str) self.source_address = self.ensure("src_addr", str) self.end_time = self.ensure("endtime", "datetime") self.leap_second_indicator = self.ensure("li", str) self.mode = self.ensure("mode", str) self.poll = self.ensure("poll", int) self.precision = self.ensure("precision", float) self.reference_id = self.ensure("ref-id", str) self.reference_time = self.ensure("ref-ts", float) self.root_delay = self.ensure("root-delay", int) self.root_dispersion = self.ensure("root-dispersion", float) self.stratum = self.ensure("stratum", int) self.version = self.ensure("version", int) self.packets = [] if "result" not in self.raw_data: self._handle_malformation("No result value found") return for response in self.raw_data["result"]: self.packets.append(Packet(response, **kwargs)) self._set_medians_and_extremes() def _set_medians_and_extremes(self): """ Sets median values for rtt and the offset of result packets. """ rtts = sorted([p.rtt for p in self.packets if p.rtt is not None]) if rtts: self.rtt_min = rtts[0] self.rtt_max = rtts[-1] self.rtt_median = self.calculate_median(rtts) offsets = sorted( [p.offset for p in self.packets if p.offset is not None] ) if offsets: self.offset_min = offsets[0] self.offset_max = offsets[-1] self.offset_median = self.calculate_median(offsets) __all__ = ( "NtpResult" )
/ripe.atlas.sagan-1.3.0.tar.gz/ripe.atlas.sagan-1.3.0/ripe/atlas/sagan/ntp.py
0.867514
0.362743
ntp.py
pypi
from .base import Result, ParsingDict class Response(ParsingDict): def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.af = self.ensure("af", int) self.body_size = self.ensure("bsize", int) self.head_size = self.ensure("hsize", int) self.destination_address = self.ensure("dst_addr", str) self.source_address = self.ensure("src_addr", str) self.code = self.ensure("res", int) self.response_time = self.ensure("rt", float) self.version = self.ensure("ver", str) if not self.destination_address: self.destination_address = self.ensure( "addr", str, self.destination_address) if not self.source_address: self.source_address = self.ensure( "srcaddr", str, self.source_address) if not self.code: self._handle_malformation("No response code available") error = self.ensure("err", str) if error: self._handle_error(error) class HttpResult(Result): METHOD_GET = "GET" METHOD_POST = "POST" METHOD_PUT = "PUT" METHOD_DELETE = "DELETE" METHOD_HEAD = "HEAD" METHODS = { METHOD_GET: "GET", METHOD_POST: "POST", METHOD_PUT: "PUT", METHOD_DELETE: "DELETE", METHOD_HEAD: "HEAD" } def __init__(self, data, **kwargs): Result.__init__(self, data, **kwargs) self.uri = self.ensure("uri", str) self.method = None self.responses = [] if "result" not in self.raw_data: self._handle_malformation("No result value found") return if isinstance(self.raw_data["result"], list): # All modern results for response in self.raw_data["result"]: self.responses.append(Response(response, **kwargs)) if self.responses: method = self.raw_data["result"][0].get( "method", self.raw_data["result"][0].get("mode") # Firmware == 4300 ) if method: method = method.replace("4", "").replace("6", "") if method in self.METHODS.keys(): self.method = self.METHODS[method] else: # Firmware <= 1 response = self.raw_data["result"].split(" ") self.method = response[0].replace("4", "").replace("6", "") self.responses.append(Response({ "dst_addr": response[1], "rt": float(response[2]) * 1000, "res": int(response[3]), "hsize": int(response[4]), "bsize": int(response[5]), })) __all__ = ( "HttpResult" )
/ripe.atlas.sagan-1.3.0.tar.gz/ripe.atlas.sagan-1.3.0/ripe/atlas/sagan/http.py
0.72662
0.180883
http.py
pypi
import logging import pytz from calendar import timegm from datetime import datetime from .helpers.compatibility import string # Try to use ujson if it's available try: import ujson as json except ImportError: import json class ResultParseError(Exception): pass class ResultError(Exception): pass class Json(object): """ ujson, while impressive, is not a drop-in replacement for json as it doesn't respect the various keyword arguments permitted in the default json parser. As a workaround for this, we have our own class that defines its own .loads() method, so we can check for whichever we're using and adjust the arguments accordingly. """ @staticmethod def loads(*args, **kwargs): try: if json.__name__ == "ujson": return json.loads(*args, **kwargs) return json.loads(strict=False, *args, **kwargs) except ValueError: raise ResultParseError("The JSON result could not be parsed") class ParsingDict(object): """ A handy container for methods we use for validation in the various result classes. Note that Python 2.x and 3.x handle the creation of dictionary-like objects differently. If we write it this way, it works for both. """ ACTION_IGNORE = 1 ACTION_WARN = 2 ACTION_FAIL = 3 PROTOCOL_ICMP = "ICMP" PROTOCOL_UDP = "UDP" PROTOCOL_TCP = "TCP" PROTOCOL_MAP = { "ICMP": PROTOCOL_ICMP, "I": PROTOCOL_ICMP, "UDP": PROTOCOL_UDP, "U": PROTOCOL_UDP, "TCP": PROTOCOL_TCP, "T": PROTOCOL_TCP, } def __init__(self, **kwargs): self._on_error = kwargs.pop("on_error", self.ACTION_WARN) self.is_error = False self.error_message = None self._on_malformation = kwargs.pop("on_malformation", self.ACTION_WARN) self.is_malformed = False def __nonzero__(self): # If we don't define this, Python ends up calling keys() # via __len__() whenever we evaluate the object as a bool. return True def __len__(self): return len(self.keys()) def __iter__(self): for key in self.keys(): yield getattr(self, key) def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, item): setattr(self, key, item) def keys(self): return [p for p in dir(self) if self._is_property_name(p)] def ensure(self, key, kind, default=None): try: if kind == "datetime": return datetime.fromtimestamp( self.raw_data[key], tz=pytz.UTC) return kind(self.raw_data[key]) except (TypeError, ValueError, KeyError): return default def clean_protocol(self, protocol): """ A lot of measurement types make use of a protocol value, so we handle that here. """ if protocol is not None: try: return self.PROTOCOL_MAP[protocol] except KeyError: self._handle_malformation( '"{protocol}" is not a recognised protocol'.format( protocol=protocol ) ) def _handle_malformation(self, message): if self._on_malformation == self.ACTION_FAIL: raise ResultParseError(message) elif self._on_malformation == self.ACTION_WARN: logging.warning(message) self.is_malformed = True def _handle_error(self, message): if self._on_error == self.ACTION_FAIL: raise ResultError(message) elif self._on_error == self.ACTION_WARN: logging.warning(message) self.is_error = True self.error_message = message def _is_property_name(self, p): if not p.startswith("_"): if p not in ("keys",): if not p.upper() == p: if not callable(getattr(self, p)): return True return False class Result(ParsingDict): """ The parent class for all measurement result classes. Subclass this to handle parsing a new measurement type, or use .get() to let this class figure out the type for you. """ def __init__(self, data, *args, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data if isinstance(data, string): self.raw_data = Json.loads(data) for key in ("timestamp", "msm_id", "prb_id", "fw", "type"): if key not in self.raw_data: raise ResultParseError( "This doesn't look like a RIPE Atlas measurement: {}".format( self.raw_data ) ) self.created = datetime.fromtimestamp( self.raw_data["timestamp"], tz=pytz.UTC) self.measurement_id = self.ensure("msm_id", int) self.probe_id = self.ensure("prb_id", int) self.firmware = self.ensure("fw", int) self.origin = self.ensure("from", str) self.seconds_since_sync = self.ensure("lts", int) self.group_id = self.ensure("group_id", int) self.bundle = self.ensure("bundle", int) # Handle the weird case where fw=0 and we don't know what to expect if self.firmware == 0: self._handle_malformation("Unknown firmware: {fw}".format( fw=self.firmware) ) if self.seconds_since_sync is not None: if self.seconds_since_sync < 0: self.seconds_since_sync = None if "dnserr" in self.raw_data: self._handle_error(self.raw_data["dnserr"]) if "err" in self.raw_data: self._handle_error(self.raw_data["err"]) def __repr__(self): return "Measurement #{measurement}, Probe #{probe}".format( measurement=self.measurement_id, probe=self.probe_id ) @property def created_timestamp(self): return timegm(self.created.timetuple()) @classmethod def get(cls, data, **kwargs): """ Call this when you have a JSON result and just want to turn it into the appropriate Result subclass. This is less performant than calling PingResult(json_string) directly however, as the JSON has to be parsed first to find the type. """ raw_data = data if isinstance(data, string): raw_data = Json.loads(data) try: kind = raw_data["type"].lower() except KeyError: raise ResultParseError("No type value was found in the JSON input") if kind == "ping": from .ping import PingResult return PingResult(raw_data, **kwargs) elif kind == "traceroute": from .traceroute import TracerouteResult return TracerouteResult(raw_data, **kwargs) elif kind == "dns": from .dns import DnsResult return DnsResult(raw_data, **kwargs) elif kind == "sslcert": from .ssl import SslResult return SslResult(raw_data, **kwargs) elif kind == "http": from .http import HttpResult return HttpResult(raw_data, **kwargs) elif kind == "ntp": from .ntp import NtpResult return NtpResult(raw_data, **kwargs) elif kind == "wifi": from .wifi import WiFiResult return WiFiResult(raw_data, **kwargs) raise ResultParseError("Unknown type value was found in the JSON input") @staticmethod def calculate_median(given_list): """ Returns the median of values in the given list. """ median = None if not given_list: return median given_list = sorted(given_list) list_length = len(given_list) if list_length % 2: median = given_list[int(list_length / 2)] else: median = (given_list[int(list_length / 2)] + given_list[int(list_length / 2) - 1]) / 2.0 return median @property def type(self): return self.__class__.__name__.replace("Result", "").lower() __all__ = ( "Result", "ResultParseError", )
/ripe.atlas.sagan-1.3.0.tar.gz/ripe.atlas.sagan-1.3.0/ripe/atlas/sagan/base.py
0.577019
0.208743
base.py
pypi
from __future__ import absolute_import import base64 from collections import namedtuple from datetime import datetime from pytz import UTC from .base import Result, ParsingDict from .helpers import abuf from .helpers import compatibility class Header(ParsingDict): def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.aa = self.ensure("AA", bool) self.qr = self.ensure("QR", bool) self.nscount = self.ensure("NSCOUNT", int) self.qdcount = self.ensure("QDCOUNT", int) self.ancount = self.ensure("ANCOUNT", int) self.tc = self.ensure("TC", bool) self.rd = self.ensure("RD", bool) self.arcount = self.ensure("ARCOUNT", int) self.return_code = self.ensure("ReturnCode", str) self.opcode = self.ensure("OpCode", str) self.ra = self.ensure("RA", bool) self.z = self.ensure("Z", int) self.ad = self.ensure("AD", bool) self.cd = self.ensure("CD", bool) self.id = self.ensure("ID", int) def __str__(self): return "Header: " + self.return_code @property def flags(self): flags = namedtuple( "Flags", ("qr", "aa", "tc", "rd", "ra", "z", "ad", "cd")) return flags(qr=self.qr, aa=self.aa, tc=self.tc, rd=self.rd, ra=self.ra, z=self.z, ad=self.ad, cd=self.cd) @property def sections(self): sections = namedtuple( "Sections", ("QDCOUNT", "ANCOUNT", "NSCOUNT", "ARCOUNT")) return sections(QDCOUNT=self.qdcount, ANCOUNT=self.ancount, NSCOUNT=self.nscount, ARCOUNT=self.arcount) @property def is_authoritative(self): return self.aa @property def is_query(self): if self.qr is None: return None return not self.qr @property def nameserver_count(self): """ Otherwise known as the NSCOUNT or the authority_count. """ return self.nscount @property def question_count(self): return self.qdcount @property def answer_count(self): return self.ancount @property def is_truncated(self): return self.tc @property def recursion_desired(self): return self.rd @property def additional_count(self): return self.arcount @property def recursion_available(self): return self.ra @property def zero(self): return self.z @property def checking_disabled(self): return self.cd @property def authenticated_data(self): return self.aa class Option(ParsingDict): def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.nsid = self.ensure("NSID", str) self.code = self.ensure("OptionCode", int) self.length = self.ensure("OptionLength", int) self.name = self.ensure("OptionName", str) class Edns0(ParsingDict): def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.extended_return_code = self.ensure("ExtendedReturnCode", int) self.name = self.ensure("Name", str) self.type = self.ensure("Type", str) self.udp_size = self.ensure("UDPsize", int) self.version = self.ensure("Version", int) self.z = self.ensure("Z", int) self.do = bool(self.ensure("DO", bool)) self.options = [] if "Option" in self.raw_data: if isinstance(self.raw_data["Option"], list): for option in self.raw_data["Option"]: self.options.append(Option(option)) class Question(ParsingDict): def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.klass = self.ensure("Qclass", str) self.type = self.ensure("Qtype", str) self.name = self.ensure("Qname", str) def __str__(self): return ";{:30} {:<5} {:5}".format(self.name, self.klass, self.type) class Answer(ParsingDict): def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.name = self.ensure("Name", str) self.ttl = self.ensure("TTL", int) self.type = self.ensure("Type", str) self.klass = self.ensure("Class", str) self.rd_length = self.ensure("RDlength", int) # Where data goes when the abuf parser can't understand things self.rdata = self.ensure("Rdata", str) @property def resource_data_length(self): return self.rd_length def __str__(self): return "{:22} {:<7} {:5} {:5}".format( self.name, self.ttl, self.klass, self.type ) class AAnswer(Answer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.address = self.ensure("Address", str) def __str__(self): return "{0} {1}".format(Answer.__str__(self), self.address) class AaaaAnswer(AAnswer): pass class NsAnswer(Answer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.target = self.ensure("Target", str) def __str__(self): return "{0} {1}".format(Answer.__str__(self), self.target) class CnameAnswer(NsAnswer): pass class MxAnswer(Answer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.preference = self.ensure("Preference", int) self.mail_exchanger = self.ensure("MailExchanger", str) def __str__(self): return "{0} {1} {2}".format( Answer.__str__(self), self.preference, self.mail_exchanger ) class SoaAnswer(Answer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.mname = self.ensure("MasterServerName", str) self.rname = self.ensure("MaintainerName", str) self.serial = self.ensure("Serial", int) self.refresh = self.ensure("Refresh", int) self.retry = self.ensure("Retry", int) self.expire = self.ensure("Expire", int) self.minimum = self.ensure("NegativeTtl", int) def __str__(self): return "{0} {1} {2} {3} {4} {5} {6} {7}".format( Answer.__str__(self), self.mname, self.rname, self.serial, self.refresh, self.retry, self.expire, self.minimum ) @property def master_server_name(self): return self.mname @property def maintainer_name(self): return self.rname @property def negative_ttl(self): return self.minimum @property def nxdomain(self): return self.minimum class DsAnswer(Answer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.tag = self.ensure("Tag", int) self.algorithm = self.ensure("Algorithm", int) self.digest_type = self.ensure("DigestType", int) self.delegation_key = self.ensure("DelegationKey", str) def __str__(self): return "{0} {1} {2} {3} {4}".format( Answer.__str__(self), self.tag, self.algorithm, self.digest_type, self.delegation_key ) class DnskeyAnswer(Answer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.flags = self.ensure("Flags", int) self.algorithm = self.ensure("Algorithm", int) self.protocol = self.ensure("Protocol", int) self.key = self.ensure("Key", str) def __str__(self): return "{0} {1} {2} {3} {4}".format( Answer.__str__(self), self.flags, self.algorithm, self.protocol, self.key ) class TxtAnswer(Answer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.data = [] if "Data" in self.raw_data: if isinstance(self.raw_data["Data"], list): self.data = [] for s in self.raw_data["Data"]: if isinstance(s, compatibility.string): self.data.append(s) def __str__(self): return "{0} {1}".format(Answer.__str__(self), self.data_string) @property def data_string(self): return " ".join(self.data) class RRSigAnswer(Answer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.type_covered = self.ensure("TypeCovered", str) self.algorithm = self.ensure("Algorithm", int) self.labels = self.ensure("Labels", int) self.original_ttl = self.ensure("OriginalTTL", int) self.signature_expiration = self.ensure("SignatureExpiration", int) self.signature_inception = self.ensure("SignatureInception", int) self.key_tag = self.ensure("KeyTag", int) self.signer_name = self.ensure("SignerName", str) self.signature = self.ensure("Signature", str) def __str__(self): formatter = "%Y%m%d%H%M%S" expiration = datetime.fromtimestamp( self.signature_expiration, tz=UTC).strftime(formatter) inception = datetime.fromtimestamp( self.signature_inception, tz=UTC).strftime(formatter) return "{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}".format( Answer.__str__(self), self.type_covered, self.algorithm, self.labels, self.original_ttl, expiration, inception, self.key_tag, self.signer_name, self.signature ) class NotFullySupportedAnswer(Answer): """ We're still working on getting the proper text representations of some Answer classes, so such classes will inherit from this one. """ def __str__(self): return "{0} ---- Not fully supported ----".format(Answer.__str__(self)) class NsecAnswer(NotFullySupportedAnswer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.next_domain_name = self.ensure("NextDomainName", str) self.types = self.ensure("Types", list) class Nsec3Answer(NotFullySupportedAnswer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.hash_algorithm = self.ensure("HashAlg", int) self.flags = self.ensure("Flags", int) self.iterations = self.ensure("Iterations", int) self.salt = self.ensure("Salt", str) self.hash = self.ensure("Hash", str) self.types = self.ensure("Types", list) class Nsec3ParamAnswer(NotFullySupportedAnswer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.algorithm = self.ensure("Algorithm", int) self.flags = self.ensure("Flags", int) self.iterations = self.ensure("Iterations", int) self.salt = self.ensure("Salt", str) class PtrAnswer(NotFullySupportedAnswer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.target = self.ensure("Target", str) class SrvAnswer(NotFullySupportedAnswer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.priority = self.ensure("Priority", int) self.weight = self.ensure("Weight", int) self.port = self.ensure("Port", int) self.target = self.ensure("Target", str) class SshfpAnswer(NotFullySupportedAnswer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.algorithm = self.ensure("Algorithm", int) self.digest_type = self.ensure("DigestType", int) self.fingerprint = self.ensure("Fingerprint", str) class TlsaAnswer(NotFullySupportedAnswer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.certificate_usage = self.ensure("CertUsage", int) self.selector = self.ensure("Selector", int) self.matching_type = self.ensure("MatchingType", int) self.certificate_associated_data = self.ensure("CertAssData", str) class HinfoAnswer(NotFullySupportedAnswer): def __init__(self, data, **kwargs): Answer.__init__(self, data, **kwargs) self.cpu = self.ensure("Cpu", str) self.os = self.ensure("Os", str) class Message(ParsingDict): ANSWER_CLASSES = { "A": AAnswer, "AAAA": AaaaAnswer, "NS": NsAnswer, "CNAME": CnameAnswer, "MX": MxAnswer, "SOA": SoaAnswer, "DS": DsAnswer, "DNSKEY": DnskeyAnswer, "TXT": TxtAnswer, "RRSIG": RRSigAnswer, "NSEC": NsecAnswer, "NSEC3": Nsec3Answer, "NSEC3PARAM": Nsec3ParamAnswer, "PTR": PtrAnswer, "SRV": SrvAnswer, "SSHFP": SshfpAnswer, "TLSA": TlsaAnswer, "HINFO": HinfoAnswer } def __init__(self, message, response_data, parse_buf=True, **kwargs): ParsingDict.__init__(self, **kwargs) self._string_representation = message self.raw_data = {} if parse_buf: self._parse_buf(message) else: self._backfill_raw_data_from_result(response_data) self.header = None if "HEADER" in self.raw_data: self.header = Header(self.raw_data["HEADER"], **kwargs) # This is a tricky one, since you can't know that the response is an # error until *after* the abuf is parsed, and it won't be parsed # until you attempt to access it. code = self.header.return_code if not code or code.upper() != "NOERROR": self._handle_error('The response did not contain "NOERROR"') self.edns0 = None self.questions = [] self.answers = [] self.authorities = [] self.additionals = [] if "EDNS0" in self.raw_data: self.edns0 = Edns0(self.raw_data["EDNS0"], **kwargs) for question in self.raw_data.get("QuestionSection", []): self.questions.append(Question(question, **kwargs)) for answer in self.raw_data.get("AnswerSection", []): self._append_answer(answer, "answers", **kwargs) for authority in self.raw_data.get("AuthoritySection", []): self._append_answer(authority, "authorities", **kwargs) for additional in self.raw_data.get("AdditionalSection", []): self._append_answer(additional, "additionals", **kwargs) def __str__(self): return self._string_representation def __repr__(self): return str(self) def _append_answer(self, answer, section, **kwargs): answer_type = answer.get("Type") if answer_type is None: self._handle_malformation( "Answer has no parseable Type: {answer}".format( answer=answer ) ) answer_class = self.ANSWER_CLASSES.get(answer_type, Answer) getattr(self, section).append(answer_class(answer, **kwargs)) def _parse_buf(self, message): try: self.raw_data = abuf.AbufParser.parse(base64.b64decode(message)) except Exception as e: self.raw_data = {} self._handle_malformation( "{exception}: Unable to parse buffer: {buffer}".format( exception=e, buffer=self._string_representation ) ) else: if "ERROR" in self.raw_data: self._handle_error(self.raw_data["ERROR"]) def _backfill_raw_data_from_result(self, response_data): # Header self.raw_data["Header"] = {} for key in ("NSCOUNT", "QDCOUNT", "ID", "ARCOUNT", "ANCOUNT"): if key in response_data: self.raw_data["Header"][key] = response_data[key] # Answers if "answers" in response_data and response_data["answers"]: # The names used in the result don't align to those used in the abuf # parser name_map = { "TTL": "TTL", "TYPE": "Type", "NAME": "Name", "RDATA": "Data", "MNAME": "MasterServerName", "RNAME": "MaintainerName", "SERIAL": "Serial", "RDLENGTH": "RDlength", } self.raw_data["AnswerSection"] = [] for answer in response_data["answers"]: temporary = {} for k, v in name_map.items(): if k in answer: temporary[v] = answer[k] # Special case where some older txt entires are strings and not # a list if temporary.get("Type") == "TXT": if isinstance(temporary.get("Data"), compatibility.string): temporary["Data"] = [temporary["Data"]] if temporary: self.raw_data["AnswerSection"].append(temporary) class Response(ParsingDict): def __init__(self, data, af=None, destination=None, source=None, protocol=None, part_of_set=True, parse_buf=True, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.af = self.ensure("af", int, af) self.destination_address = self.ensure("dst_addr", str, destination) self.source_address = self.ensure("src_addr", str, source) self.protocol = self.ensure("proto", str, protocol) self.response_id = None # Preparing for lazy stuff self._abuf = None self._qbuf = None self._parse_buf = parse_buf try: self.response_time = round(float(self.raw_data["result"]["rt"]), 3) except KeyError: try: self.response_time = round(self.ensure("rt", float), 3) except TypeError: self.response_time = None try: self.response_size = self.raw_data["result"]["size"] except KeyError: self.response_size = self.ensure("size", int) if part_of_set: self.response_id = self.ensure("subid", int) if self.protocol and isinstance(self.protocol, str): self.protocol = self.clean_protocol(self.protocol) @property def abuf(self): return self._get_buf("a") @property def qbuf(self): return self._get_buf("q") def _get_buf(self, prefix): """ Lazy read-only accessor for the (a|q)buf. The qbuf Message object is cached for subsequent requests. """ kind = "{prefix}buf".format(prefix=prefix) private_name = "_" + kind buf = getattr(self, private_name) if buf: return buf try: buf_string = self.raw_data["result"][kind] except KeyError: buf_string = self.ensure(kind, str) if buf_string: message = Message( buf_string, self.raw_data, parse_buf=self._parse_buf, on_error=self._on_error, on_malformation=self._on_malformation ) if message.is_error: self._handle_error(message.error_message) setattr(self, private_name, message) return getattr(self, private_name) class DnsResult(Result): def __init__(self, data, parse_buf=True, **kwargs): """ Note that we're not setting `self.af` here, but rather we have it as a property of `Response` as it's possible that one result can contain multiple responses, each with either af=4 or af=6. """ Result.__init__(self, data, **kwargs) self.responses = [] self.responses_total = None af = self.ensure("af", int) protocol = self.ensure("proto", str) source_address = self.ensure("src_addr", str) destination_address = self.ensure("dst_addr", str) if 0 < self.firmware < 4460: af = self.ensure("pf", int) part_of_set, responses = self.build_responses() for response in responses: self.responses.append(Response( response, af=af, destination=destination_address, source=source_address, protocol=protocol, part_of_set=part_of_set, parse_buf=parse_buf, **kwargs )) if "error" in self.raw_data: if isinstance(self.raw_data["error"], dict): if "timeout" in self.raw_data["error"]: self._handle_error("Timeout: {timeout}".format( timeout=self.raw_data["error"]["timeout"] )) elif "getaddrinfo" in self.raw_data["error"]: self._handle_error("Name resolution error: {msg}".format( msg=self.raw_data["error"]["getaddrinfo"] )) else: self._handle_error("Unknown error: {msg}".format( msg=self.raw_data["error"] )) else: self._handle_error("Unknown error: {msg}".format( msg=self.raw_data["error"] )) def build_responses(self): """ DNS measurement results are a little wacky. Sometimes you get a single response, other times you get a set of responses (result set). In order to establish a unified interface, we conform all results to the same format: a list of response objects. Additionally, the qbuf property is weird too. In the case of multiple responses, there's one qbuf for every response, but for single results, it's not stored in the result, but rather the outer result data. Again, for the purposes of uniformity, we shoehorn the qbuf into the first (and only) response in the latter case. """ responses = [] part_of_set = True # Account for single results if "result" in self.raw_data: if "qbuf" in self.raw_data: if "qbuf" not in self.raw_data["result"]: self.raw_data["result"]["qbuf"] = self.raw_data.pop("qbuf") responses.append(self.raw_data["result"]) part_of_set = False try: self.responses_total = int(self.raw_data["result"]["submax"]) except (KeyError, ValueError): pass # The value wasn't there, not much we can do about it try: responses += self.raw_data["resultset"] except KeyError: pass # self.responses remains the same return part_of_set, responses __all__ = ( "DnsResult", )
/ripe.atlas.sagan-1.3.0.tar.gz/ripe.atlas.sagan-1.3.0/ripe/atlas/sagan/dns.py
0.820972
0.198142
dns.py
pypi
import logging import pytz import codecs from datetime import datetime try: from cryptography import x509 from cryptography.x509.oid import NameOID from cryptography.hazmat.backends import openssl from cryptography.hazmat.primitives import hashes except ImportError: logging.warning( "cryptography module is not installed, without it you cannot parse SSL " "certificate measurement results" ) from .base import Result, ParsingDict from .helpers.compatibility import string EXT_SAN = "subjectAltName" class Certificate(ParsingDict): def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.subject_cn = None self.subject_o = None self.subject_c = None self.issuer_cn = None self.issuer_o = None self.issuer_c = None self.valid_from = None self.valid_until = None self.checksum_md5 = None self.checksum_sha1 = None self.checksum_sha256 = None self.has_expired = None self.extensions = {} cert = x509.load_pem_x509_certificate(data.encode("ascii"), openssl.backend) if cert: self.checksum_md5 = self._colonify(cert.fingerprint(hashes.MD5())) self.checksum_sha1 = self._colonify(cert.fingerprint(hashes.SHA1())) self.checksum_sha256 = self._colonify(cert.fingerprint(hashes.SHA256())) self.valid_from = pytz.utc.localize(cert.not_valid_before) self.valid_until = pytz.utc.localize(cert.not_valid_after) self.has_expired = self._has_expired() self._add_extensions(cert) if cert and cert.subject: self.subject_cn, self.subject_o, self.subject_c = \ self._parse_x509_name(cert.subject) if cert and cert.issuer: self.issuer_cn, self.issuer_o, self.issuer_c = \ self._parse_x509_name(cert.issuer) # OID name lookup of the common abbreviations # In reality probably only CN will be used _oid_names = { NameOID.COMMON_NAME: "CN", NameOID.ORGANIZATION_NAME: "O", NameOID.ORGANIZATIONAL_UNIT_NAME: "OU", NameOID.COUNTRY_NAME: "C", NameOID.STATE_OR_PROVINCE_NAME: "S", NameOID.LOCALITY_NAME: "L", } def _get_oid_name(self, oid): return self._oid_names.get(oid, oid.dotted_string) def _name_attribute_to_string(self, name): """ Build a /-separated string from an x509.Name. """ return "".join( "/{}={}".format( self._get_oid_name(attr.oid), attr.value, ) for attr in name ) def _get_subject_alternative_names(self, ext): """ Return a list of Subject Alternative Name values for the given x509 extension object. """ values = [] for san in ext.value: if isinstance(san.value, string): # Pass on simple string SAN values values.append(san.value) elif isinstance(san.value, x509.Name): # In theory there there could be >1 RDN here... values.extend( self._name_attribute_to_string(rdn) for rdn in san.value.rdns ) return values def _add_extensions(self, cert): for ext in cert.extensions: if ext.oid._name == EXT_SAN: self.extensions[EXT_SAN] = self._get_subject_alternative_names(ext) @staticmethod def _colonify(bytes): hex = codecs.getencoder("hex_codec")(bytes)[0].decode("ascii").upper() return ":".join(a+b for a, b in zip(hex[::2], hex[1::2])) @staticmethod def _parse_x509_name(name): cn = None o = None c = None for attr in name: if attr.oid == NameOID.COUNTRY_NAME: c = attr.value elif attr.oid == NameOID.ORGANIZATION_NAME: o = attr.value elif attr.oid == NameOID.COMMON_NAME: cn = attr.value return cn, o, c def _has_expired(self): now = pytz.utc.localize(datetime.utcnow()) return self.valid_from <= now <= self.valid_until @property def cn(self): return self.subject_cn @property def o(self): return self.subject_o @property def c(self): return self.subject_c @property def common_name(self): return self.cn @property def organisation(self): return self.o @property def country(self): return self.c @property def checksum(self): return self.checksum_sha256 class Alert(ParsingDict): # Taken from https://tools.ietf.org/html/rfc5246#section-7.2 DESCRIPTION_MAP = { 0: "close_notify", 10: "unexpected_message", 20: "bad_record_mac", 21: "decryption_failed_RESERVED", 22: "record_overflow", 30: "decompression_failure", 40: "handshake_failure", 41: "no_certificate_RESERVED", 42: "bad_certificate", 43: "unsupported_certificate", 44: "certificate_revoked", 45: "certificate_expired", 46: "certificate_unknown", 47: "illegal_parameter", 48: "unknown_ca", 49: "access_denied", 50: "decode_error", 51: "decrypt_error", 60: "export_restriction_RESERVED", 70: "protocol_version", 71: "insufficient_security", 80: "internal_error", 90: "user_canceled", 100: "no_renegotiation", 110: "unsupported_extension", } def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.level = self.ensure("level", int) self.description = self.ensure("decription", int) if self.description is None: self.description = self.ensure("description", int) @property def description_string(self): return self.DESCRIPTION_MAP.get(self.description, "Unknown") class SslResult(Result): def __init__(self, data, **kwargs): Result.__init__(self, data, **kwargs) self.af = self.ensure("af", int) self.destination_address = self.ensure("dst_addr", str) self.destination_name = self.ensure("dst_name", str) self.source_address = self.ensure("src_addr", str) self.port = self.ensure("dst_port", int) self.method = self.ensure("method", str) self.version = self.ensure("ver", str) self.response_time = self.ensure("rt", float) self.time_to_connect = self.ensure("ttc", float) if "error" in self.raw_data: self._handle_error(self.raw_data["error"]) # Older versions used named ports if self.port is None and self.raw_data.get("dst_port") == "https": self.port = 443 self.alert = None self.certificates = [] self.is_self_signed = False if "alert" in self.raw_data: self.alert = Alert(self.raw_data["alert"], **kwargs) self._handle_error(self.alert.description_string) if "cert" in self.raw_data and isinstance(self.raw_data["cert"], list): for certificate in self.raw_data["cert"]: try: self.certificates.append(Certificate(certificate, **kwargs)) except Exception as exc: self._handle_error(str(exc)) continue if len(self.certificates) == 1: certificate = self.certificates[0] if certificate.subject_cn == certificate.issuer_cn: self.is_self_signed = True @property def checksum_chain(self): """ Returns a list of checksums joined with "::". """ checksums = [] for certificate in self.certificates: checksums.append(certificate.checksum) return "::".join(checksums) __all__ = ( "SslResult" )
/ripe.atlas.sagan-1.3.0.tar.gz/ripe.atlas.sagan-1.3.0/ripe/atlas/sagan/ssl.py
0.646906
0.184657
ssl.py
pypi
import logging from calendar import timegm from .base import Result, ParsingDict class IcmpHeader(ParsingDict): """ But why did we stop here? Why not go all the way and define subclasses for each object and for `mpls`? it comes down to a question of complexity vs. usefulness. This is such a fringe case that it's probably fine to just dump the data in to `self.objects` and let people work from there. If however you feel that this needs expansion, pull requests are welcome :-) Further information regarding the structure and meaning of the data in this class can be found here: http://localhost:8000/docs/data_struct/ """ def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.version = self.ensure("version", int) self.rfc4884 = self.ensure("rfc4884", bool) self.objects = self.ensure("obj", list) class Packet(ParsingDict): ERROR_CONDITIONS = { "N": "Network unreachable", "H": "Destination unreachable", "A": "Administratively prohibited", "P": "Protocol unreachable", "p": "Port unreachable", } def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.origin = self.ensure("from", str) self.rtt = self.ensure("rtt", float) self.size = self.ensure("size", int) self.ttl = self.ensure("ttl", int) self.mtu = self.ensure("mtu", int) self.destination_option_size = self.ensure("dstoptsize", int) self.hop_by_hop_option_size = self.ensure("hbhoptsize", int) self.arrived_late_by = self.ensure("late", int, 0) self.internal_ttl = self.ensure("ittl", int, 1) if self.rtt: self.rtt = round(self.rtt, 3) error = self.ensure("err", str) if error: self._handle_error(self.ERROR_CONDITIONS.get(error, error)) icmp_header = self.ensure("icmpext", dict) self.icmp_header = None if icmp_header: self.icmp_header = IcmpHeader(icmp_header, **kwargs) def __str__(self): return self.origin class Hop(ParsingDict): def __init__(self, data, **kwargs): ParsingDict.__init__(self, **kwargs) self.raw_data = data self.index = self.ensure("hop", int) error = self.ensure("error", str) if error: self._handle_error(error) self.packets = [] packet_rtts = [] if "result" in self.raw_data: for raw_packet in self.raw_data["result"]: if "late" not in raw_packet: packet = Packet(raw_packet, **kwargs) if packet.rtt: packet_rtts.append(packet.rtt) self.packets.append(packet) self.median_rtt = Result.calculate_median(packet_rtts) def __str__(self): return str(self.index) class TracerouteResult(Result): def __init__(self, data, **kwargs): Result.__init__(self, data, **kwargs) self.af = self.ensure("af", int) self.destination_address = self.ensure("dst_addr", str) self.destination_name = self.ensure("dst_name", str) self.source_address = self.ensure("src_addr", str) self.end_time = self.ensure("endtime", "datetime") self.paris_id = self.ensure("paris_id", int) self.size = self.ensure("size", int) if 0 < self.firmware < 4460: self.af = self.ensure("pf", int) self.protocol = self.clean_protocol(self.ensure("proto", str)) self.hops = [] self.total_hops = 0 self.last_median_rtt = None # Used by a few response tests below self.destination_ip_responded = False self.last_hop_responded = False self.is_success = False self.last_hop_errors = [] self._parse_hops(**kwargs) # Sets hops, last_median_rtt, and total_hops @property def last_rtt(self): logging.warning( '"last_rtt" is deprecated and will be removed in future versions. ' 'Instead, use "last_median_rtt".') return self.last_median_rtt @property def target_responded(self): logging.warning( 'The "target_responded" property is deprecated and will be removed ' 'in future versions. Instead, use "destination_ip_responded".' ) return self.destination_ip_responded def set_destination_ip_responded(self, last_hop): """Sets the flag if destination IP responded.""" if not self.destination_address: return for packet in last_hop.packets: if packet.origin and \ self.destination_address == packet.origin: self.destination_ip_responded = True break def set_last_hop_responded(self, last_hop): """Sets the flag if last hop responded.""" for packet in last_hop.packets: if packet.rtt: self.last_hop_responded = True break def set_is_success(self, last_hop): """Sets the flag if traceroute result is successful or not.""" for packet in last_hop.packets: if packet.rtt and not packet.is_error: self.is_success = True break else: self.set_last_hop_errors(last_hop) def set_last_hop_errors(self, last_hop): """Sets the last hop's errors.""" if last_hop.is_error: self.last_hop_errors.append(last_hop.error_message) return for packet in last_hop.packets: if packet.is_error: self.last_hop_errors.append(packet.error_message) @property def end_time_timestamp(self): return timegm(self.end_time.timetuple()) @property def ip_path(self): """ Returns just the IPs from the traceroute. """ r = [] for hop in self.hops: r.append([packet.origin for packet in hop.packets]) return r def _parse_hops(self, parse_all_hops=True, **kwargs): try: hops = self.raw_data["result"] assert(isinstance(hops, list)) except (KeyError, AssertionError): self._handle_malformation("Legacy formats not supported") return num_hops = len(hops) # Go through the hops in reverse so that if # parse_all_hops is False we can stop processing as # soon as possible. for index, raw_hop in reversed(list(enumerate(hops))): hop = Hop(raw_hop, **kwargs) # If last hop set several useful attributes if index + 1 == num_hops: self.set_destination_ip_responded(hop) self.set_last_hop_responded(hop) self.set_is_success(hop) # We always store the last hop self.hops.insert(0, hop) elif parse_all_hops: self.hops.insert(0, hop) if hop.median_rtt and not self.last_median_rtt: self.last_median_rtt = hop.median_rtt if not parse_all_hops: # Now that we have the last RTT we can stop break self.total_hops = num_hops __all__ = ( "TracerouteResult", )
/ripe.atlas.sagan-1.3.0.tar.gz/ripe.atlas.sagan-1.3.0/ripe/atlas/sagan/traceroute.py
0.73412
0.341747
traceroute.py
pypi
from .base import Result, ResultParseError, ParsingDict class Packet(ParsingDict): def __init__(self, data, default_ttl, default_source_address, **kwargs): ParsingDict.__init__(self, **kwargs) self.rtt = None self.dup = False self.ttl = None self.source_address = data.get( "src_addr", data.get( "srcaddr", default_source_address ) ) if "rtt" in data: try: self.rtt = round(float(data["rtt"]), 3) except (ValueError, TypeError): raise ResultParseError( 'RTT "{rtt}" does not appear to be a float'.format( rtt=data["rtt"] ) ) if self.rtt: self.ttl = default_ttl if "ttl" in data: try: self.ttl = int(data["ttl"]) except (ValueError, TypeError): raise ResultParseError( 'TTL "{ttl}" does not appear to be an integer'.format( ttl=data["ttl"] ) ) if "dup" in data: self.dup = True def __str__(self): return str(self.rtt) class PingResult(Result): """ Ping measurement result class """ def __init__(self, data, **kwargs): Result.__init__(self, data, **kwargs) self.af = self.ensure("af", int) self.duplicates = self.ensure("dup", int) self.rtt_average = self.ensure("avg", float) self.rtt_max = self.ensure("max", float) self.rtt_min = self.ensure("min", float) self.packets_sent = self.ensure("sent", int) self.packets_received = self.ensure("rcvd", int) self.packet_size = self.ensure("size", int) self.destination_name = self.ensure("dst_name", str) self.destination_address = self.ensure("dst_addr", str) self.step = self.ensure("step", int) self.rtt_median = None # Redefined in self._set_rtt_median() self.packets = [] if self.rtt_average < 0: self.rtt_average = self.rtt_min = self.rtt_max = None if 0 < self.firmware < 4460: self.af = self.ensure("pf", int) self.protocol = self.clean_protocol(self.ensure("proto", str)) if 0 < self.firmware < 4460: self.destination_address = self.ensure("addr", str) self.destination_name = self.ensure("name", str) self.packet_size = None elif 0 < self.firmware < 4570 and self.protocol == self.PROTOCOL_ICMP: self.packet_size -= 8 if self.af is None and self.destination_address: self.af = 4 if ":" in self.destination_address: self.af = 6 if self.rtt_average: self.rtt_average = round(self.rtt_average, 3) self._parse_packets(**kwargs) self._set_rtt_median() def _parse_packets(self, **kwargs): source_address = self.raw_data.get( "src_addr", self.raw_data.get("srcaddr") ) for packet in self.ensure("result", list, []): self.packets.append( Packet( packet, self.ensure("ttl", int), source_address, **kwargs ) ) def _set_rtt_median(self): packets = sorted([ p.rtt for p in self.packets if p.rtt is not None and p.dup is False ]) self.rtt_median = self.calculate_median(packets) __all__ = ( "PingResult", )
/ripe.atlas.sagan-1.3.0.tar.gz/ripe.atlas.sagan-1.3.0/ripe/atlas/sagan/ping.py
0.651244
0.221098
ping.py
pypi
from ripe.atlas.sagan import Result from ripe.atlas.sagan import ResultParseError from ripe.atlas.cousteau import ProbeRequest from ripe.atlas.cousteau import Probe as CProbe from .exceptions import RipeAtlasToolsException from .cache import cache from .settings import conf class FilterFactory(object): @staticmethod def create(key, value): """Create new filter class based on the key""" if key == "asn": return ASNFilter(value) else: return Filter(key, value) class Filter(object): """ Class that represents filter for results. For now supports only attributes of probes property of Result property. It could be extended for any property of Result easily. """ def __init__(self, key, value): self.key = key self.value = value def filter(self, result): """ Decide if given result should be filtered (False) or remain on the pile of results. """ try: attr_value = getattr(result.probe, self.key) except AttributeError: log = ( "Cousteau's Probe class does not have an attribute " "called: <{}>" ).format(self.key) raise RipeAtlasToolsException(log) if attr_value == self.value: return True return False class ASNFilter(Filter): """Class thar represents filter by probes that belong to given ASN.""" def __init__(self, value): key = "asn" super(ASNFilter, self).__init__(key, value) def filter(self, result): asn_v4 = getattr(result.probe, "asn_v4") asn_v6 = getattr(result.probe, "asn_v6") if self.value in (asn_v4, asn_v6): return True return False def filter_results(filters, results): """docstring for filter""" new_results = [] for result in results: for rfilter in filters: if rfilter.filter(result): new_results.append(result) break return new_results class SaganSet(object): """ An iterable of sagan results with attached probe information that allows for filtering by the filters module. """ def __init__(self, iterable=None, probes=()): self._probes = probes self._iterable = iterable def __iter__(self): sagans = [] for line in self._iterable: # line may be a dictionary (parsed JSON) if hasattr(line, "strip"): line = line.strip() # Break out when there's nothing left if not line: break try: sagan = Result.get( line, on_error=Result.ACTION_IGNORE, on_warning=Result.ACTION_IGNORE, ) if not self._probes or sagan.probe_id in self._probes: sagans.append(sagan) if len(sagans) > 100: for sagan in self._attach_probes(sagans): yield sagan sagans = [] except ResultParseError: pass # Probably garbage in the file for sagan in self._attach_probes(sagans): yield sagan def __next__(self): return iter(self).next() def next(self): return self.__next__() def _attach_probes(self, sagans): probes = dict( [ (p.id, p) for p in Probe.get_many( (s.probe_id for s in sagans) ) ] ) for sagan in sagans: sagan.probe = probes[sagan.probe_id] yield sagan class Probe(object): """ A crude representation of the data we get from the API via Cousteau """ EXPIRE_TIME = 60 * 60 * 24 * 30 @classmethod def get(cls, pk): """ Given a single id, attempt to fetch a probe object from the cache. If that fails, do an API call to get it. Don't use this for multiple probes unless you know they're all in the cache, or you'll be in for a long wait. """ r = cache.get("probe:{}".format(pk)) if not r: kwargs = {"id": pk, "server": conf["api-server"]} probe = CProbe(**kwargs) cache.set("probe:{}".format(probe.id), probe, cls.EXPIRE_TIME) return probe @classmethod def get_many(cls, ids): """ Given a list of ids, attempt to get probe objects out of the local cache. Probes that cannot be found will be fetched from the API and cached for future use. """ r = [] fetch_ids = [] for pk in ids: probe = cache.get("probe:{}".format(pk)) if probe: r.append(probe) else: fetch_ids.append(str(pk)) if fetch_ids: kwargs = {"id__in": fetch_ids, "server": conf["api-server"]} for probe in [p for p in ProbeRequest(return_objects=True, **kwargs)]: cache.set("probe:{}".format(probe.id), probe, cls.EXPIRE_TIME) r.append(probe) return r
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/filters.py
0.719679
0.289359
filters.py
pypi
import argparse import os import re import sys from dateutil import parser from ..settings import aliases class ArgumentType(object): @staticmethod def path(string): if not os.path.exists(string) and not string == "-": raise argparse.ArgumentTypeError( 'The file name specified, "{}" does not appear to exist'.format(string) ) return string @staticmethod def country_code(string): if not re.match(r"^[a-zA-Z][a-zA-Z]$", string): raise argparse.ArgumentTypeError( "Countries must be defined with a two-letter ISO code" ) return string.upper() @staticmethod def datetime(string): try: return parser.parse(string) except parser.ParserError: raise argparse.ArgumentTypeError( "Times must be specified in ISO 8601 format. For example: " "2010-10-01T00:00:00 or a portion thereof. All times are in " "UTC." ) @staticmethod def ip_or_domain(string): message = '"{}" does not appear to be an IP address or host ' "name".format( string ) if " " in string: raise argparse.ArgumentTypeError(message) if "." not in string and ":" not in string: if not re.match(r"^\w+$", string): raise argparse.ArgumentTypeError(message) return string @classmethod def comma_separated_integers_or_file(cls, string): """ Allow a list of comma-separated integers, or a file containing a newline-separated list of integers, OR "-" which implies standard out. """ if re.match(r"^((\d+,?)+)$", string): return cls.comma_separated_integers()(string) f = sys.stdin if not string == "-": if not os.path.exists(string): raise argparse.ArgumentTypeError("Cannot find file: {}".format(string)) f = open(string) try: return [int(_) for _ in f.readlines()] except ValueError: raise argparse.ArgumentTypeError( "The contents of the file presented does not conform to input " "standards. Please ensure that every line in the file " "consists of a single integer." ) @staticmethod def tag(string): pattern = re.compile(r"^[a-z_\-0-9]+$") if not pattern.match(string): raise argparse.ArgumentTypeError( '"{}" does not appear to be a valid tag.'.format(string) ) return string class integer_range(object): def __init__(self, minimum=float("-inf"), maximum=float("inf")): self.minimum = minimum self.maximum = maximum def __call__(self, string): message = "The integer must be between {} and {}.".format( self.minimum, self.maximum ) if self.maximum == float("inf"): message = "The integer must be greater than {}.".format(self.minimum) try: integer = int(string) if integer < self.minimum or integer > self.maximum: raise argparse.ArgumentTypeError(message) except ValueError: raise argparse.ArgumentTypeError("An integer must be specified.") return integer class comma_separated_integers(object): def __init__(self, minimum=float("-inf"), maximum=float("inf")): self.minimum = minimum self.maximum = maximum def __call__(self, string): r = [] for i in string.split(","): try: i = int(i) except ValueError: raise argparse.ArgumentTypeError( "The ids supplied were not in the correct format. Note " "that you must specify them as a list of " "comma-separated integers without spaces. Example: " "1,2,34,157,10006" ) if i < self.minimum: raise argparse.ArgumentTypeError( "{} is lower than the minimum permitted value of " "{}.".format(i, self.minimum) ) if i > self.maximum: raise argparse.ArgumentTypeError( "{} exceeds the maximum permitted value of {}.".format( i, self.maximum ) ) r.append(i) return r class regex(object): def __init__(self, regex): self.regex = re.compile(regex) def __call__(self, string): if not self.regex.match(string): raise argparse.ArgumentTypeError( '"{}" does not appear to be valid.'.format(string) ) return string @staticmethod def alias_is_valid(string): ret = None if string and not string.isdigit(): pattern = re.compile(r"^[a-zA-Z\._\-0-9]+$") if pattern.match(string): ret = string if not ret: raise argparse.ArgumentTypeError( '"{}" does not appear to be a valid ' "alias.".format(string) ) return ret class id_or_alias(object): TYPE = None def __call__(self, string): if string.isdigit(): return int(string) if string in aliases[self.TYPE]: return int(aliases[self.TYPE][string]) else: raise argparse.ArgumentTypeError( '"{}" does not appear to be an existent ' "{} alias.".format(string, self.TYPE) ) class msm_id_or_name(id_or_alias): TYPE = "measurement" class probe_id_or_name(id_or_alias): TYPE = "probe"
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/helpers/validators.py
0.561575
0.210219
validators.py
pypi
import argparse import csv import re from io import StringIO from typing import (Any, Dict, Iterable, Iterator, List, Mapping, Optional, Tuple) from typing_extensions import Literal, NotRequired, TypedDict from .colours import colourise Alignment = Literal["<", "^", ">"] LEFT: Alignment = "<" CENTRE: Alignment = "^" RIGHT: Alignment = ">" class ColumnDef(TypedDict): align: Alignment width: int class RowDef(TypedDict): obj: NotRequired[Any] values: Dict[str, Any] colour: NotRequired[str] # ((1, "Value1"), (3, True), (4, 5342),) AggregationKey = Tuple[Any, ...] class SortableNull: def __str__(self): return "null" def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, type(self)) def __lt__(self, other): if isinstance(other, type(self)): return False return True def __gt__(self, other): return False class Renderer: """ Abstract base class for tabular renderers. """ def __init__( self, rows: List[RowDef], total_count: int, columns: Dict[str, ColumnDef], filters: Mapping[str, str], arguments: argparse.Namespace, ): self.rows = rows self.total_count = total_count self.columns = columns self.filters = filters self.aggregate_by: List[str] = arguments.aggregate_by self.max_per_aggregation: Optional[int] = arguments.max_per_aggregation self.no_header: bool = arguments.no_header def __iter__(self) -> Iterator[str]: if not self.no_header: for line in self.get_header(): yield line if self.aggregate_by: for composite, rows in self._aggregate(): aggr_header = self.get_aggregation_header(composite) if aggr_header is not None: yield aggr_header for row in rows: yield self.get_line(row) else: for row in self.rows: yield self.get_line(row) if not self.no_header: for line in self.get_footer(): yield line def get_header(self) -> Iterable[str]: """ Return an iterable of lines to output before the table. """ return [] def get_aggregation_header(self, composite: AggregationKey) -> Optional[str]: """ Return a line to output before each aggregation bucket. """ return None def get_line(self, row: RowDef) -> str: """ Return a single line rendering the given row. """ raise NotImplementedError def get_footer(self) -> Iterable[str]: """ Return an iterable of lines to output after the table. """ return [] def _aggregate(self) -> Iterable[Tuple[AggregationKey, List[RowDef]]]: buckets: Dict[AggregationKey, List[RowDef]] = {} for row in self.rows: values = [row["values"][k] for k in self.aggregate_by] composite = tuple(SortableNull() if v is None else v for v in values) bucket = buckets.setdefault(composite, []) if ( self.max_per_aggregation is None or len(bucket) < self.max_per_aggregation ): bucket.append(row) return sorted(buckets.items()) class PrettyRenderer(Renderer): """ Renderer which outputs colourised, human-readable tables """ _fmt = None @property def fmt(self) -> str: if not self._fmt: self._fmt = " ".join( f"{{!s:{c['align']}{c['width']}}}" for c in self.columns.values() ) return self._fmt def get_header(self) -> Iterable[str]: if self.filters: yield "" yield colourise("Filters:", "white") for k, v in self.filters.items(): if k not in ("search",): v = str(v) yield colourise(f" {k}: {v}", "cyan") yield "" yield colourise(self.fmt.format(*self.columns.keys()), "white") yield colourise(self._get_horizontal_rule(), "white") def get_aggregation_header(self, composite: AggregationKey) -> str: header = dict(zip(self.aggregate_by, composite)) values = [] for name in self.columns: if name in self.aggregate_by: values.append(header[name]) else: values.append(" ") line = self.fmt.format(*values) result = re.sub( r" ( *)( [^ ]|$)", lambda s: " " + len(s.group(1)) * "-" + s.group(2), line, ) for name in self.aggregate_by: if name not in self.columns: result += f" ({name}:{header[name]})" return result def get_line(self, row: RowDef) -> str: values = [ "-" if val is None else str(val)[: col["width"]] for (col, val) in zip(self.columns.values(), row["values"].values()) ] line = self.fmt.format(*values) if row.get("colour"): line = colourise(line, row["colour"]) return line def get_footer(self) -> Iterable[str]: hr = self._get_horizontal_rule() yield colourise(hr, "white") yield colourise( ("{:>" + str(len(hr)) + "}").format( "Showing {} of {}".format( min(len(self.rows), self.total_count) or "all", self.total_count, ) ), "white", ) def _get_horizontal_rule(self, char="=") -> str: """ A bit of a hack: We get a formatted line for no other reason than to determine the width of that line. Then we use a regex to overwrite that line with "=". """ return re.sub(r".", char, self.fmt.format(*["" for c in self.columns])) class CSVRenderer(Renderer): """ Renderer which outputs comma-separated values, where strings are always enclosed in double quotes, and literal double quotes are written as "". """ dialect = "excel" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.data = StringIO() self.writer = csv.writer(self.data, dialect=self.dialect) def get_header(self) -> Iterable[str]: yield self.get_line({"values": dict((k, k) for k in self.columns.keys())}) def get_line(self, row: RowDef) -> str: self.data.seek(self.data.truncate(0)) self.writer.writerow( [ "" if v is None else str(v) for (k, v) in row["values"].items() if k in self.columns ] ) return self.data.getvalue()[:-1] class TabRenderer(Renderer): """ Renderer which outputs tab-separated values, where literal tabs are replaced with spaces. """ def get_header(self) -> Iterable[str]: yield self.get_line({"values": dict((k, k) for k in self.columns.keys())}) def get_line(self, row: RowDef) -> str: return "\t".join( "" if v is None else str(v).replace("\t", " ") for (k, v) in row["values"].items() if k in self.columns ) class IDAction(argparse.Action): def __call__(self, parser, namespace, *args, **kwargs): namespace.format = "tab" namespace.field = ["id"] namespace.no_header = True def add_argument_group(parser: argparse.ArgumentParser, fields: Iterable[str]): """ Add an argument group to the given parser with all of the parameters for controlling the tabular renderers. """ fields = list(fields) group = parser.add_argument_group("Output") group.add_argument( "--field", type=str, action="append", choices=fields, default=[], help="The field(s) to display. Invoke multiple times for multiple fields.", ) group.add_argument( "--format", default="pretty", choices=renderers.keys(), help="Output format. Default: pretty", ) group.add_argument( "--no-header", action="store_true", default=False, help="Omit header line(s)", ) group.add_argument( "--ids-only", nargs=0, action=IDAction, help="Print only IDs, equivalent to --format=tab --field=id --no-header. " "Useful for piping to another command.", ) group.add_argument( "--aggregate-by", action="append", default=[], choices=fields, help="Aggregate results based on all specified aggregations." " Use this option multiple times for more specific aggregations.", ) group.add_argument( "--max-per-aggregation", type=int, help="Maximum number of rows per aggregated bucket.", ) renderers = { "pretty": PrettyRenderer, "csv": CSVRenderer, "tab": TabRenderer, }
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/helpers/tabular.py
0.842378
0.22213
tabular.py
pypi
from collections.abc import Mapping import copy import os import re import yaml from ..helpers import xdg class UserSettingsParser(object): USER_CONFIG_DIR = xdg.get_config_home() USER_RC = None def get(self): r = copy.deepcopy(self.DEFAULT) if os.path.exists(self.USER_RC): with open(self.USER_RC) as y: custom = yaml.load(y, Loader=yaml.FullLoader) if custom: r = self.deep_update(r, custom) return r @classmethod def deep_update(cls, d, u): """ Updates a dictionary with another dictionary, only it goes deep. Stolen from http://stackoverflow.com/questions/3232943/ """ for k, v in u.items(): if isinstance(v, Mapping): r = cls.deep_update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d @staticmethod def write(data): raise NotImplementedError() class Configuration(UserSettingsParser): """ A singleton configuration class that's smart enough to create a config out of defaults + yaml """ USER_RC = os.path.join(UserSettingsParser.USER_CONFIG_DIR, "rc") DEFAULT = { "authorisation": { "fetch": "", "fetch_aliases": {}, "create": "", "google_geocoding": "", }, "specification": { "af": 4, "description": "", "source": { "type": "area", "value": "WW", "requested": 50, }, "spread": None, "resolve_on_probe": None, "times": { "one-off": True, "interval": None, "start": None, "stop": None, }, "types": { "ping": { "packets": 3, "packet-interval": 1000, "size": 48, "include_probe_id": None, }, "traceroute": { "packets": 3, "size": 48, "protocol": "ICMP", "dont-fragment": False, "paris": 0, "first-hop": 1, "max-hops": 255, "port": 80, "destination-option-size": None, "hop-by-hop-option-size": None, "timeout": 4000, "response-timeout": None, "duplicate-timeout": None, }, "sslcert": { "port": 443, "hostname": None, }, "ntp": {"packets": 3, "timeout": 4000}, "dns": { "set-cd-bit": False, "set-do-bit": False, "protocol": "UDP", "query-class": "IN", "query-type": "A", "query-argument": None, "set-nsid-bit": False, "udp-payload-size": 512, "set-rd-bit": True, "retry": 0, "timeout": None, "tls": False, }, "http": { "header-bytes": 0, "version": "1.1", "method": "GET", "port": 80, "path": "/", "query-string": None, "user-agent": "RIPE Atlas: https://atlas.ripe.net/", "body-bytes": None, "timing-verbosity": 0, }, }, "tags": { "ipv4": { "ping": {"include": [], "exclude": []}, "traceroute": {"include": [], "exclude": []}, "dns": {"include": [], "exclude": []}, "sslcert": {"include": [], "exclude": []}, "http": {"include": [], "exclude": []}, "ntp": {"include": [], "exclude": []}, "all": {"include": ["system-ipv4-works"], "exclude": []}, }, "ipv6": { "ping": {"include": [], "exclude": []}, "traceroute": {"include": [], "exclude": []}, "dns": {"include": [], "exclude": []}, "sslcert": {"include": [], "exclude": []}, "http": {"include": [], "exclude": []}, "ntp": {"include": [], "exclude": []}, "all": {"include": ["system-ipv6-works"], "exclude": []}, }, }, }, "ripe-ncc": { "endpoint": "https://atlas.ripe.net", "stream-base-url": "https://atlas-stream.ripe.net", "version": 0, }, } @staticmethod def write(config): """ PyYaml is incapable of preserving comments, or even specifying them as an argument to `.dump()` (http://pyyaml.org/ticket/114), so we have to do some regex gymnastics here to make sure that the config file remains easy for n00bs to read. """ template = os.path.join(os.path.dirname(__file__), "templates", "base.yaml") authorisation = re.compile("^authorisation:$", re.MULTILINE) tags = re.compile("^ tags:$", re.MULTILINE) specification = re.compile("^specification:$", re.MULTILINE) ripe = re.compile("^ripe-ncc:$", re.MULTILINE) with open(template) as t: payload = str(t.read()).format( payload=yaml.dump(config, default_flow_style=False) ) payload = ripe.sub( "\n# Don't mess with these, or Bad Things may happen\n" "ripe-ncc:", payload, ) payload = authorisation.sub("# Authorisation\n" "authorisation:", payload) payload = specification.sub( "\n# Measurement Creation\n" "specification:", payload ) payload = tags.sub( " # Tags added to probes selection\n" " tags:", payload ) with open(Configuration.USER_RC, "w") as rc: rc.write(payload) def get(self): d = super().get() d["website-url"] = d["ripe-ncc"]["endpoint"] d["api-server"] = d["ripe-ncc"]["endpoint"].replace("https://", "") d["stream-base-url"] = d["ripe-ncc"]["stream-base-url"] return d class AliasesDB(UserSettingsParser): """ A singleton class to manage user aliases """ USER_RC = os.path.join(UserSettingsParser.USER_CONFIG_DIR, "aliases") DEFAULT = {"measurement": {}, "probe": {}} @staticmethod def write(aliases): if not os.path.exists(AliasesDB.USER_CONFIG_DIR): os.makedirs(AliasesDB.USER_CONFIG_DIR) payload = yaml.dump(aliases, default_flow_style=False) with open(AliasesDB.USER_RC, "w") as rc: rc.write(payload) conf = Configuration().get() aliases = AliasesDB().get()
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/settings/__init__.py
0.628293
0.237366
__init__.py
pypi
from ..ipdetails import IP from .base import Renderer as BaseRenderer class Renderer(BaseRenderer): RENDERS = [BaseRenderer.TYPE_TRACEROUTE] DEFAULT_RADIUS = 2 @staticmethod def add_arguments(parser): group = parser.add_argument_group( title="Optional arguments for traceroute_aspath renderer" ) group.add_argument( "--traceroute-aspath-radius", type=int, help="Number of different ASs starting from the end of the " "traceroute path. " "Default: {}.".format(Renderer.DEFAULT_RADIUS), metavar="RADIUS", default=Renderer.DEFAULT_RADIUS, ) def __init__(self, *args, **kwargs): BaseRenderer.__init__(self, *args, **kwargs) self.paths = {} # Number of different ASs starting from the end of the traceroute path. if "arguments" in kwargs: self.RADIUS = kwargs["arguments"].traceroute_aspath_radius else: self.RADIUS = Renderer.DEFAULT_RADIUS @staticmethod def _get_asns_for_output(asns, radius): asns_with_padding = [""] * radius + asns asns_with_padding = asns_with_padding[-radius:] return " ".join( [ "{:>8}".format("AS{}".format(asn) if asn else "") for asn in asns_with_padding ] ) def header(self, sample): return ( "For each traceroute path toward the target, the " "last {} ASNs will be shown\n\n".format(self.RADIUS) ) def on_result(self, result): ip_hops = [] for hop in result.hops: for packet in hop.packets: if packet.origin: ip_hops.append(packet.origin) break asns = [] # starting from the last hop's IP, get up to <RADIUS> ASNs for address in reversed(ip_hops): ip = IP(address) if ip.asn and ip.asn not in asns: asns.append(ip.asn) if len(asns) == self.RADIUS: break as_path = self._get_asns_for_output(list(reversed(asns)), self.RADIUS) if as_path not in self.paths: self.paths[as_path] = {} self.paths[as_path]["cnt"] = 0 self.paths[as_path]["responded"] = 0 self.paths[as_path]["cnt"] += 1 if result.destination_ip_responded: self.paths[as_path]["responded"] += 1 return "Probe #{:<5}: {}, {}completed\n".format( result.probe_id, as_path, "NOT " if not result.destination_ip_responded else "", ) def footer(self): s = "\nNumber of probes for each AS path:\n\n" for as_path in self.paths: s += " {}: {} probe{}, {} completed\n".format( as_path, self.paths[as_path]["cnt"], "s" if self.paths[as_path]["cnt"] > 1 else "", self.paths[as_path]["responded"], ) return s
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/renderers/traceroute_aspath.py
0.63409
0.168241
traceroute_aspath.py
pypi
import importlib import os import pkgutil import sys from ..exceptions import RipeAtlasToolsException from ..helpers import xdg class Renderer(object): TYPE_PING = "ping" TYPE_TRACEROUTE = "traceroute" TYPE_DNS = "dns" TYPE_SSLCERT = "sslcert" TYPE_HTTP = "http" TYPE_NTP = "ntp" RENDERS = () def __init__(self, *args, **kwargs): """ If "arguments" is in kwargs it can be used to gather renderer's optional arguments which have been passed via CLI. See also add_arguments(). """ self.show_header = True self.show_footer = True if "arguments" in kwargs: self.show_header = kwargs["arguments"].show_header self.show_footer = kwargs["arguments"].show_footer @classmethod def get_available(cls): """ Return a list of renderers available to be used. """ paths = [os.path.dirname(__file__)] if "HOME" in os.environ: path = xdg.get_config_home() sys.path.append(path) paths += [os.path.join(path, "renderers")] names = [] for _, module_name, _ in pkgutil.iter_modules(paths): if module_name == "base": continue # Check that we can actually use this renderer, otherwise drop it try: cls.get_renderer_by_name(module_name) except Exception: continue names.append(module_name) return names @staticmethod def add_common_arguments(parser): group = parser.add_argument_group(title="Optional arguments for all renderers") group.add_argument( "--no-header", dest="show_header", action="store_false", help="Don't show a header/title before rendering results", ) group.add_argument( "--no-footer", dest="show_footer", action="store_false", help="Don't show a footer/summary after rendering results", ) @staticmethod def add_arguments_for_available_renderers(parser): Renderer.add_common_arguments(parser) for renderer_name in Renderer.get_available(): renderer_cls = Renderer.get_renderer_by_name(renderer_name) renderer_cls.add_arguments(parser) @staticmethod def render_template(template, **kwargs): """ A crude templating engine. """ template = os.path.join(os.path.dirname(__file__), "templates", template) with open(template) as f: return str(f.read()).format(**kwargs) @classmethod def get_renderer(cls, name=None, kind=None): """ Using the name if you've asked for it specifically, or attempting to guess the appropriate renderer based on the kind of measurement, this will return a Renderer subclass or None if nothing can be found. """ renderer = None if name: renderer = cls.get_renderer_by_name(name) if not renderer and kind: renderer = cls.get_renderer_by_kind(kind) if kind: cls._test_renderer_accepts_kind(renderer, kind) return renderer @classmethod def get_renderer_by_name(cls, name): error_message = f'The renderer you selected, "{name}" could not be found.' # User-defined, user-supplied r = cls.import_renderer("renderers", name) if not r: r = cls.import_renderer("ripe.atlas.tools.renderers", name) if not r: raise RipeAtlasToolsException(error_message) return r @classmethod def get_renderer_by_kind(cls, kind): error_message = ( f'A default renderer for "{kind}" measurements could not be found.' ) r = cls.import_renderer("ripe.atlas.tools.renderers", kind) if not r: raise RipeAtlasToolsException(error_message) return r @staticmethod def import_renderer(package, name): """ Return the Renderer class from package.name, or None if either package or package.name don't exist. """ full_name = f"{package}.{name}" try: spec = importlib.util.find_spec(full_name) except ModuleNotFoundError: return if not spec: return return getattr(importlib.import_module(full_name), "Renderer") @staticmethod def add_arguments(parser): """ Add renderer's optional arguments here. Suggested format: group = parser.add_argument_group( title="Optional arguments for XXX renderer" ) group.add_argument( ... ) """ pass def render(self, results, sample=None): """ Render the given iterable of RIPE Atlas JSON results. """ # Put aggregated and unaggregated results in the same format normalized = dict(results) if isinstance(results, dict) else {"": results} header_shown = False last_key = None for key, results in normalized.items(): for sagan in results: # Possibly show render header if self.show_header and not header_shown: print(self.header(sagan), end="") header_shown = True if key: indent = " " if key != last_key: # Show aggregation group header print("\n" + key) last_key = key else: indent = "" line = Result(self.on_result(sagan), sagan.probe_id) print(indent + line, end="") if self.show_footer: print(self.footer(), end="") def header(self, sample): """ Override this to add a header. `sample` is a single parsed result from the result set (probably the first one). It can be used to infer metadata about the measurement without having to do an extra API call. """ return "" def footer(self): """ Override this to add a footer. To provide a summary here, statistics should be gathered in the `on_result` callback, ideally without storing all results in memory. """ return "" @staticmethod def _test_renderer_accepts_kind(renderer, kind): if kind not in renderer.RENDERS: raise RipeAtlasToolsException( "The renderer selected does not appear to support measurements " 'of type "{}"'.format(kind) ) def on_result(self, result): """ This must be defined in the subclass, and must return a string, even if that string is "". """ raise NotImplementedError() class Result(str): """ A string-like object that we can use to render results, but that contains enough information to be used by the aggregators if need be. """ def __new__(cls, value, probe_id): obj = str.__new__(cls, value) obj.probe_id = probe_id return obj
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/renderers/base.py
0.544075
0.150809
base.py
pypi
from ..helpers.sanitisers import sanitise from .base import Renderer as BaseRenderer class Renderer(BaseRenderer): """ This is meant to be a stub example for what an aggregate renderer might look like. If you have ideas as to how to make this better, feel free to send along a pull request. """ RENDERS = [BaseRenderer.TYPE_PING] def __init__(self): self.target = "" self.packet_loss = 0 self.sent_packets = 0 self.received_packets = 0 self.rtts = [] self.rtts_min = [] self.rtts_max = [] self.rtt_types_map = { "min": self.rtts_min, "max": self.rtts_max } def header(self): return "Collecting results...\n" def additional(self, results): self.collect_stats(results) self.packet_loss = self.calculate_loss() return self.render( "reports/aggregate_ping.txt", target=sanitise(self.target), sent=self.sent_packets, received=self.received_packets, packet_loss=self.packet_loss, min=min(self.rtts_min), median=self.median(), mean=self.mean(), max=max(self.rtts_max) ) def collect_stats(self, results): """ Calculates, stores and collects all stats we want from the given results. """ for result in results: self.set_target(result) self.sent_packets += result.packets_sent self.received_packets += result.packets_received self.collect_min_max_rtts("min", result.rtt_min) self.collect_min_max_rtts("max", result.rtt_max) self.collect_packets_rtt(result.packets) def set_target(self, result): """Sets the target of the measurement if not set.""" if not self.target: self.target = result.destination_name def collect_min_max_rtts(self, rtt_type, rtt): """ Stores the given rtt in the corresponding list (min/max) if rtt is set. """ rtt = rtt if not rtt: rtt = 0 self.rtt_types_map[rtt_type].append(rtt) def collect_packets_rtt(self, packets): """ Collects all the rrts of given packets and stores them in our rtts list. """ for packet in packets: rtt = packet.rtt if not packet.rtt: rtt = 0 self.rtts.append(rtt) def calculate_loss(self): """Calculates the total loss between received and sent packets.""" if not self.sent_packets: return 0 return (1 - float(self.received_packets) / self.sent_packets) * 100 def mean(self): """Calculates the mean of the collected rtts""" return round( float(sum(self.rtts)) / max(len(self.rtts), 1), 3 ) def median(self): """Calculates the median of the collected rtts""" sorted_rtts = sorted(self.rtts) index = (len(self.rtts) - 1) // 2 if len(self.rtts) % 2: return sorted_rtts[index] else: return (sorted_rtts[index] + sorted_rtts[index + 1]) / 2.0 def on_result(self, result): return ""
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/renderers/aggregate_ping.py
0.875721
0.30978
aggregate_ping.py
pypi
from ..helpers.sanitisers import sanitise from .base import Renderer as BaseRenderer class Renderer(BaseRenderer): """ This is meant to be a stub example for what an aggregate renderer might look like. If you have ideas as to how to make this better, feel free to send along a pull request. """ RENDERS = [BaseRenderer.TYPE_PING] def __init__(self, **kwargs): BaseRenderer.__init__(self, **kwargs) self.target = "" self.packet_loss = 0 self.sent_packets = 0 self.received_packets = 0 self.rtts = [] self.rtts_min = [] self.rtts_max = [] self.rtt_types_map = {"min": self.rtts_min, "max": self.rtts_max} def collect_stats(self, result): """ Calculates, stores and collects all stats we want from the given result. """ if not self.target: self.target = result.destination_name self.sent_packets += result.packets_sent self.received_packets += result.packets_received self.collect_min_max_rtts("min", result.rtt_min) self.collect_min_max_rtts("max", result.rtt_max) self.collect_packets_rtt(result.packets) def collect_min_max_rtts(self, rtt_type, rtt): """ Stores the given rtt in the corresponding list (min/max) if rtt is set. """ rtt = rtt if not rtt: rtt = 0 self.rtt_types_map[rtt_type].append(rtt) def collect_packets_rtt(self, packets): """ Collects all the rrts of given packets and stores them in our rtts list. """ for packet in packets: rtt = packet.rtt if not packet.rtt: rtt = 0 self.rtts.append(rtt) def calculate_loss(self): """Calculates the total loss between received and sent packets.""" if not self.sent_packets: return 0 return (1 - float(self.received_packets) / self.sent_packets) * 100 def mean(self): """Calculates the mean of the collected rtts""" return round(float(sum(self.rtts)) / max(len(self.rtts), 1), 3) def median(self): """Calculates the median of the collected rtts""" sorted_rtts = sorted(self.rtts) index = (len(self.rtts) - 1) // 2 if len(self.rtts) % 2: return sorted_rtts[index] else: return (sorted_rtts[index] + sorted_rtts[index + 1]) / 2.0 def on_result(self, result): packets = result.packets if not packets: return "No packets found\n" self.collect_stats(result) # Because the origin value is more reliable as "from" in v4 and as # "packet.source_address" in v6. origin = result.origin if ":" in origin: origin = packets[0].source_address times = ", ".join([str(_.rtt) + " ms" for _ in packets]) return ( f"{result.packet_size} bytes from {result.destination_address} via " f"probe #{result.probe_id} ({origin})" f": ttl={packets[0].ttl} times={times}\n" ) def header(self, sample): resolved_on = ( "server" if sample.destination_address == sample.destination_name else "probe" ) return f"PING {sample.destination_name} (resolved on {resolved_on})\n" def footer(self): if not self.sent_packets: return "" self.packet_loss = self.calculate_loss() return self.render_template( "reports/aggregate_ping.txt", target=sanitise(self.target), sent=self.sent_packets, received=self.received_packets, packet_loss=self.packet_loss, min=min(self.rtts_min), median=self.median(), mean=self.mean(), max=max(self.rtts_max), )
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/renderers/ping.py
0.859369
0.331918
ping.py
pypi
from ripe.atlas.cousteau import Measurement from ripe.atlas.cousteau.exceptions import APIResponseError from .base import Command as BaseCommand, MetaDataMixin from ..exceptions import RipeAtlasToolsException from ..helpers.colours import colourise from ..helpers.sanitisers import sanitise from ..helpers.validators import ArgumentType from ..settings import conf class Command(MetaDataMixin, BaseCommand): NAME = "measurement-info" DESCRIPTION = "Return the meta data for one measurement" def add_arguments(self): self.parser.add_argument( "id", type=ArgumentType.msm_id_or_name(), help="The measurement id or alias" ) def run(self): try: measurement = Measurement( server=conf["api-server"], id=self.arguments.id, user_agent=self.user_agent, ) except APIResponseError: raise RipeAtlasToolsException("That measurement does not appear to exist") self.render_basic(measurement) getattr(self, "render_{}".format(measurement.type.lower()))(measurement) @classmethod def render_basic(cls, measurement): cls._render( measurement, ( ("id", "ID"), ( "id", "URL", lambda id: colourise( f"{conf['website-url']}/measurements/id/", "cyan" ), ), ("type", "Type", cls._prettify_type), ("status", "Status"), ("description", "Description", sanitise), ("af", "Address Family"), ("is_public", "Public?", cls._prettify_boolean), ("is_oneoff", "One-off?", cls._prettify_boolean), ("target", "Target Name", sanitise), ("target_address", "Target Address", sanitise), ("target_asn", "Target ASN"), ("interval", "Interval"), ("spread", "Spread"), ("creation_time", "Created", cls._prettify_time), ("start_time", "Started", cls._prettify_time), ("stop_time", "Stopped", cls._prettify_time), ("probes_requested", "Probes Requested"), ("probes_scheduled", "Probes Scheduled"), ("probes_currently_involved", "Probes Involved"), ("participant_count", "Participant Count"), ("is_all_scheduled", "Fully Scheduled?", cls._prettify_boolean), ("resolved_ips", "Resolved IPs", lambda _: ", ".join(_)), ("resolve_on_probe", "Resolve on the Probe", cls._prettify_boolean), ), ) @classmethod def render_ping(cls, measurement): cls._render( measurement, ( ("packets", "Packets"), ("size", "Size"), ), ) @classmethod def render_traceroute(cls, measurement): cls._render( measurement, ( ("packets", "Packets"), ("protocol", "Protocol"), ("dont_fragment", "Don't Fragment", cls._prettify_boolean), ("paris", "Paris"), ("first_hop", "First Hop"), ("max_hops", "Maximum Hops"), ("timeout", "Timeout"), ("size", "Size"), ("destination_option_size", "Destination Option Size"), ("hop_by_hop_option_size", "Hop-by-hop Option Size"), ("gap_limit", "Gap Limit"), ), ) @classmethod def render_dns(cls, measurement): cls._render( measurement, ( ("query", "Query", cls._prettify_query), ("retry", "Retry Times"), ("include_qbuf", "Include the Qbuf?", cls._prettify_boolean), ("include_abuf", "Include the Abuf?", cls._prettify_boolean), ("protocol", "Protocol"), ("prepend_probe_id", "Prepend the Probe ID?"), ("udp_payload_size", "UDP Payload Size"), ( "use_probe_resolver", "Use the Probe's Resolver?", cls._prettify_boolean, ), ("set_do_bit", "Set the DO Bit?", cls._prettify_boolean), ("set_nsid_bit", "Set the NSID Bit?", cls._prettify_boolean), ("set_rd_bit", "Set the RD Bit?", cls._prettify_boolean), ("set_cd_bit", "Set the CD Bit?", cls._prettify_boolean), ), ) @classmethod def render_sslcert(cls, measurement): cls._render(measurement, (("port", "Port"),)) @classmethod def render_http(cls, measurement): cls._render( measurement, ( ("header_bytes", "Header Bytes"), ("version", "Version"), ("method", "Method"), ("port", "Port"), ("path", "Path", sanitise), ("query_string", "Query String", sanitise), ("user_agent", "User-Agent"), ("max_bytes_read", "Body Bytes"), ), ) timing_verbosity = 0 if "extended_timing" in measurement.meta_data: if measurement.meta_data["extended_timing"]: timing_verbosity = 1 if "more_extended_timing" in measurement.meta_data: if measurement.meta_data["more_extended_timing"]: timing_verbosity = 2 cls._render_line("Timing Verbosity", timing_verbosity) @classmethod def render_ntp(cls, measurement): cls._render( measurement, ( ("packets", "Packets"), ("timeout", "Timeout"), ), ) @staticmethod def _prettify_type(kind): types = { "ping": "Ping", "traceroute": "Traceroute", "dns": "DNS", "sslcert": "SSL Certificate", "http": "HTTP", "ntp": "NTP", } if kind in types: return colourise(colourise(types[kind], "bold"), "blue") return colourise("Unknown", "red") @staticmethod def _prettify_query(query): return sanitise( "{} {} {}".format(query["class"], query["type"], query["value"]) ) @classmethod def _render(cls, measurement, keys): for prop in keys: value = cls._get_measurement_property(measurement, prop[0]) if value != "-" and len(prop) == 3: value = prop[2](value) cls._render_line(prop[1], value) @classmethod def _get_measurement_property(cls, measurement, property_name): value = getattr(measurement, property_name, None) if value is None and property_name in measurement.meta_data: value = measurement.meta_data[property_name] if value is None: value = "-" return value
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/measurement_info.py
0.741861
0.289284
measurement_info.py
pypi
import itertools import sys from typing import Any, Dict, List, Mapping, Tuple import requests from ripe.atlas.cousteau import ProbeRequest from ..exceptions import RipeAtlasToolsException from ..helpers import tabular from ..helpers.sanitisers import sanitise from ..helpers.validators import ArgumentType from ..settings import conf from .base import Command as BaseCommand class Command(BaseCommand): NAME = "probe-search" MAX_PAGE_SIZE = 500 # Request chunks of this size or smaller DESCRIPTION = ( "Fetch and print probes fulfilling specified criteria based on " "given filters" ) COLUMNS: Dict[str, tabular.ColumnDef] = { "id": {"align": ">", "width": 5}, "asn_v4": {"align": ">", "width": 6}, "asn_v6": {"align": ">", "width": 6}, "country": {"align": "^", "width": 7}, "status": {"align": "^", "width": 15}, "prefix_v4": {"align": ">", "width": 18}, "prefix_v6": {"align": ">", "width": 18}, "coordinates": {"align": "^", "width": 19}, "is_public": {"align": "^", "width": 9}, "description": {"align": "^", "width": 30}, "address_v4": {"align": ">", "width": 15}, "address_v6": {"align": ">", "width": 39}, "is_anchor": {"align": "^", "width": 9}, } def add_arguments(self) -> None: """Adds all commands line arguments for this command.""" asn = self.parser.add_argument_group("ASN") asn.add_argument( "--asn", type=int, help="Probes in IPv4 or IPv6 prefixes announced by this ASN", ) asn.add_argument( "--asnv4", type=int, help="Probes in IPv4 prexfixes announced by this ASN" ) asn.add_argument( "--asnv6", type=int, help="Probes in IPv6 prefixes announced by this ASN" ) prefix = self.parser.add_argument_group("Prefix") prefix.add_argument( "--prefix", type=str, help="Probes with addresses in this IPv4 or IPv6 CIDR prefix", ) prefix.add_argument( "--prefixv4", type=str, help="Probes with addresses in this IPv4 prefix" ) prefix.add_argument( "--prefixv6", type=str, help="Probes with addresses in this IPv6 prefix" ) area = self.parser.add_argument_group("Area") geo_location = area.add_mutually_exclusive_group() geo_location.add_argument( "--location", type=str, help="The location of probes as a string i.e. 'Amsterdam'", ) geo_location.add_argument( "--center", type=str, help="Location as <lat>,<lon>-string, i.e. '48.45,9.16'. " "Note: use --center=-5,10 (no space) to allow for negative latitudes", ) geo_location.add_argument( "--country", type=str, help="The country code of probes." ) area.add_argument( "--radius", type=int, default=15, help="Radius in km from specified center/point. Default: 15", ) self.parser.add_argument( "--tag", type=ArgumentType.tag, action="append", metavar="TAG", help="Include only probes that are marked with these tags. " "Use --tag multiple times to filter on the basis of more " "than one tag. " "Example: --tag system-ipv6-works --tag system-ipv4-works", dest="tags", ) self.parser.add_argument( "--limit", type=int, default=25, help="Return at most this number of probes. Default: 25", ) self.parser.add_argument( "--all", action="store_true", help="Fetch all probes; takes a long time!", ) self.parser.add_argument( "--status", type=int, choices=[0, 1, 2, 3], help=( "Probe's connection status [0 - Never Connected, " "1 - Connected, 2 - Disconnected, 3 - Abandoned]" ), ) self.parser.add_argument( "--auth", type=str, default=conf["authorisation"]["google_geocoding"], help=( "Google Geocoding API key to be " "used to perform --location search." ), ) tabular.add_argument_group(self.parser, self.COLUMNS.keys()) def run(self) -> None: if not self.arguments.field: self.arguments.field = [ "id", "asn_v4", "asn_v6", "country", "status", ] if self.arguments.all: self.arguments.limit = sys.maxsize filters = self._get_filters() request_fields = self._get_request_fields() probes = ProbeRequest( server=conf["api-server"], return_objects=True, user_agent=self.user_agent, fields=",".join(request_fields), page_size=min(self.MAX_PAGE_SIZE, self.arguments.limit), **filters ) truncated_probes = itertools.islice(probes, self.arguments.limit) renderer = tabular.renderers[self.arguments.format] rows = [self._get_row(m) for m in truncated_probes] for line in renderer( rows=rows, total_count=probes.total_count, columns=dict((c, self.COLUMNS[c]) for c in self.arguments.field), filters=filters, arguments=self.arguments, ): print(line) def _get_filters(self) -> Dict[str, str]: """ Get the request filters for sending to the API. """ if self.arguments.all: return {} args: Dict[str, Any] = {} if any([self.arguments.asn, self.arguments.asnv4, self.arguments.asnv6]): args.update(self._clean_asn()) if any( [ self.arguments.prefix, self.arguments.prefixv4, self.arguments.prefixv6, ] ): args.update(self._clean_prefix()) if self.arguments.location: args.update(self._clean_location()) if self.arguments.center: args.update(self._clean_center()) if self.arguments.country: args.update(self._clean_country_code()) if self.arguments.status is not None: args.update({"status": self.arguments.status}) if self.arguments.tags: args.update({"tags": ",".join(self.arguments.tags)}) return args def _clean_asn(self) -> Mapping[str, int]: """Make sure ASN arguments don't conflict and make sense.""" asn = self.arguments.asn asnv4 = self.arguments.asnv4 asnv6 = self.arguments.asnv6 if asn and (asnv4 or asnv6): exc_log = ( "Specifying argument --asn together with --asnv4/--asnv6 " "doesn't make sense" ) raise RipeAtlasToolsException(exc_log) if asn: return {"asn": asn} asn_args = {} if asnv4: asn_args["asn_v4"] = asnv4 if asnv6: asn_args["asn_v6"] = asnv6 return asn_args def _clean_prefix(self) -> Dict[str, str]: """Make sure ASN arguments don't conflict and make sense.""" prefix = self.arguments.prefix prefixv4 = self.arguments.prefixv4 prefixv6 = self.arguments.prefixv6 if prefix and (prefixv4 or prefixv6): exc_log = ( "Specifying argument --prefix together with " "--prefixv4/--prefixv6 doesn't make sense" ) raise RipeAtlasToolsException(exc_log) if prefix: return {"prefix": prefix} prefix_args = {} if prefixv4: prefix_args["prefix_v4"] = prefixv4 if prefixv6: prefix_args["prefix_v6"] = prefixv6 return prefix_args def _clean_location(self) -> Dict[str, Any]: """Make sure location argument are sane.""" if not self.arguments.auth: raise RipeAtlasToolsException( "--location requires a Google Geocoding API key specified with " "--auth or configure command (authorisation.google_geocoding)" ) lat, lng = self.location2degrees() if self.arguments.radius: location_args = { "radius": "{0},{1}:{2}".format(lat, lng, self.arguments.radius) } else: location_args = {"latitude": lat, "longitude": lng} return location_args def location2degrees(self) -> Tuple[str, str]: """Fetches degrees based on the given location.""" error_log = ( "The following error occured while trying to fetch lat/lon " "for location <{}>:\n\n{}" ) google_api_url = "https://maps.googleapis.com/maps/api/geocode/json" try: result = requests.get( google_api_url, params={ "key": self.arguments.auth, "address": self.arguments.location, }, ) except ( requests.ConnectionError, requests.HTTPError, requests.Timeout, ) as e: error_log = error_log.format(self.arguments.location, e) raise RipeAtlasToolsException(error_log) data = result.json() if "error_message" in data: error = error_log.format(self.arguments.location, data["error_message"]) raise RipeAtlasToolsException(error) try: lat = data["results"][0]["geometry"]["location"]["lat"] lng = data["results"][0]["geometry"]["location"]["lng"] except (KeyError, IndexError) as e: error = error_log.format(self.arguments.location, e) raise RipeAtlasToolsException(error) return str(lat), str(lng) def _clean_center(self) -> Dict[str, Any]: """Make sure center argument are sane.""" try: lat, lng = self.arguments.center.split(",") except ValueError: raise RipeAtlasToolsException( "Point argument should be in <lat,lng> format." ) if self.arguments.radius: center_args = { "radius": "{0},{1}:{2}".format(lat, lng, self.arguments.radius) } else: center_args = {"latitude": lat, "longitude": lng} return center_args def _clean_country_code(self) -> Dict[str, str]: """Make sure country_code argument are sane.""" return {"country_code": self.arguments.country} def _get_row(self, probe) -> tabular.RowDef: r = {} for field in self.arguments.field + self.arguments.aggregate_by: if field == "country": r[field] = (probe.country_code or "").lower() elif field in ("asn_v4", "asn_v6"): r[field] = getattr(probe, field) or None elif field == "description": description = sanitise(probe.description) or None r[field] = description elif field == "coordinates": if probe.geometry and probe.geometry["coordinates"]: lng, lat = probe.geometry["coordinates"] r[field] = "{:7.4f} {:8.4f}".format(lat, lng) else: r[field] = None elif field in ("is_public", "is_anchor"): if getattr(probe, field): r[field] = "\u2714" # Check mark else: r[field] = "\u2718" # X else: r[field] = sanitise(getattr(probe, field)) return {"values": r, "colour": self._get_colour_from_status(probe.status)} @staticmethod def _get_colour_from_status(status: str) -> str: if status == "Connected": return "green" if status == "Disconnected": return "yellow" if status == "Abandoned": return "red" return "white" def _get_request_fields(self) -> List[str]: request_fields = list(self.arguments.field) for field in self.arguments.aggregate_by: if field not in request_fields: request_fields.append(field) if "country" in request_fields: request_fields.remove("country") request_fields.append("country_code") if "status" not in request_fields: request_fields.append("status") if "coordinates" in request_fields: request_fields.remove("coordinates") request_fields.append("geometry") return request_fields
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/probe_search.py
0.558809
0.18628
probe_search.py
pypi
from ripe.atlas.cousteau import Probe from ripe.atlas.cousteau.exceptions import APIResponseError from .base import Command as BaseCommand, MetaDataMixin from ..exceptions import RipeAtlasToolsException from ..helpers.colours import colourise from ..helpers.sanitisers import sanitise from ..helpers.validators import ArgumentType from ..settings import conf class Command(MetaDataMixin, BaseCommand): NAME = "probe-info" DESCRIPTION = "Return the meta data for one probe" def add_arguments(self): self.parser.add_argument( "id", type=ArgumentType.probe_id_or_name(), help="The probe id or alias" ) def run(self): try: probe = Probe( server=conf["api-server"], id=self.arguments.id, user_agent=self.user_agent, ) except APIResponseError: raise RipeAtlasToolsException("That probe does not appear to exist") keys = ( ("id", "ID"), ( "id", "URL", lambda id: colourise(f"{conf['website-url']}/probes/{id}/", "cyan"), ), ("is_public", "Public?", self._prettify_boolean), ("is_anchor", "Anchor?", self._prettify_boolean), ("country_code", "Country"), ("description", "Description", sanitise), ("asn_v4", "ASN (IPv4)"), ("asn_v6", "ASN (IPv6)"), ("address_v4", "Address (IPv4)"), ("address_v6", "Address (IPv6)"), ("prefix_v4", "Prefix (IPv4)"), ("prefix_v6", "Prefix (IPv6)"), ("geometry", "Coordinates", self._prettify_coordinates), ("status", "Status"), ) for key in keys: value = getattr(probe, key[0]) if value is None: value = "-" elif len(key) == 3: value = key[2](value) self._render_line(key[1], value) print(colourise("Tags", "bold")) for tag in probe.tags: print(" {}".format(tag["slug"])) @staticmethod def _prettify_coordinates(geometry): if geometry and "coordinates" in geometry and geometry["coordinates"]: return "{},{}".format( geometry["coordinates"][1], geometry["coordinates"][0] )
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/probe_info.py
0.627152
0.178562
probe_info.py
pypi
from ripe.atlas.cousteau import Measurement from ripe.atlas.cousteau.exceptions import APIResponseError from ..exceptions import RipeAtlasToolsException from ..renderers import Renderer from ..streaming import Stream from .base import Command as BaseCommand from ..helpers.validators import ArgumentType class Command(BaseCommand): NAME = "stream" DESCRIPTION = "Output the results of a public measurement as they become available" EXTRA_DESCRIPTION = "Streaming of non-public measurements is not supported." URLS = { "detail": "/api/v2/measurements/{0}.json", } def add_arguments(self): self.parser.add_argument( "measurement_id", type=ArgumentType.msm_id_or_name(), help="The measurement id or alias you want streamed", ) self.parser.add_argument( "--limit", type=int, help="The maximum number of results you want to stream", ) self.parser.add_argument( "--renderer", choices=Renderer.get_available(), help="The renderer you want to use. If this isn't defined, an " "appropriate renderer will be selected.", ) self.parser.add_argument( "--timeout", type=float, help="Stop streaming after this number of seconds", ) Renderer.add_arguments_for_available_renderers(self.parser) def run(self): try: measurement = Measurement( id=self.arguments.measurement_id, user_agent=self.user_agent, ) except APIResponseError as e: raise RipeAtlasToolsException(e.args[0]) self.ok("Connecting to stream...") stream = Stream( self.arguments.measurement_id, capture_limit=self.arguments.limit, timeout=self.arguments.timeout, ) renderer = Renderer.get_renderer( name=self.arguments.renderer, kind=measurement.type.lower() )(arguments=self.arguments) renderer.render(stream) self.ok("Disconnected from stream")
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/stream.py
0.677367
0.206174
stream.py
pypi
import os import sys try: import ujson as json except ImportError: import json import itertools from ripe.atlas.sagan import Result from ripe.atlas.cousteau import AtlasLatestRequest, AtlasResultsRequest from ..aggregators import RangeKeyAggregator, ValueKeyAggregator, aggregate from ..exceptions import RipeAtlasToolsException from ..helpers.validators import ArgumentType from ..renderers import Renderer from .base import Command as BaseCommand from ..filters import SaganSet, FilterFactory, filter_results from ..settings import conf class Command(BaseCommand): NAME = "report" DESCRIPTION = ( "Report the results of an existing measurement from the API, " "a file or standard input" ) EXTRA_DESCRIPTION = ( "Examples:\n" " ripe-atlas report 1001 --probes 157,10006\n" " ripe-atlas report --from-file results.json\n" " cat results.json | ripe-atlas report --aggregate-by prefix_v4\n" ) AGGREGATORS = { "country": ["probe.country_code", ValueKeyAggregator], "rtt-median": [ "rtt_median", RangeKeyAggregator, [10, 20, 30, 40, 50, 100, 200, 300], ], "status": ["probe.status", ValueKeyAggregator], "asn_v4": ["probe.asn_v4", ValueKeyAggregator], "asn_v6": ["probe.asn_v6", ValueKeyAggregator], "prefix_v4": ["probe.prefix_v4", ValueKeyAggregator], "prefix_v6": ["probe.prefix_v6", ValueKeyAggregator], } def add_arguments(self): self.parser.add_argument( "measurement_id", type=ArgumentType.msm_id_or_name(), help="The measurement ID or alias to fetch from the results API. " "(Conflicts with the --from-file option)", nargs="?", ) self.parser.add_argument( "--auth", type=str, choices=conf["authorisation"]["fetch_aliases"].keys(), help="The API key alias you want to use to fetch the measurement. " "To configure an API key alias, use " "`ripe-atlas configure --set " "authorisation.fetch_aliases.ALIAS_NAME=YOUR_KEY`. " "(Can also be passed using the ATLAS_FETCH_KEY environment variable)", ) self.parser.add_argument( "--probes", type=ArgumentType.comma_separated_integers_or_file, help="Either a comma-separated list of probe ids you want to see " "exclusively, a path to a file containing probe ids (one on " 'each line), or "-" for standard input in the same format.', ) self.parser.add_argument( "--renderer", choices=Renderer.get_available(), help="The renderer you want to use. If this isn't defined, an " "appropriate renderer will be selected.", ) self.parser.add_argument( "--aggregate-by", type=str, choices=self.AGGREGATORS.keys(), action="append", help="Tell the rendering engine to aggregate the results by the " "selected option. Note that if you opt for aggregation, no " "output will be generated until all results are received.", ) self.parser.add_argument( "--probe-asns", type=ArgumentType.comma_separated_integers( minimum=1, # http://www.iana.org/assignments/as-numbers/as-numbers.xhtml maximum=2 ** 32 - 2, ), help="A comma-separated list of probe ASNs you want to see " "exclusively.", ) self.parser.add_argument( "--start-time", type=ArgumentType.datetime, help="The start time of the report.", ) self.parser.add_argument( "--stop-time", type=ArgumentType.datetime, help="The stop time of the report.", ) self.parser.add_argument( "--from-file", type=ArgumentType.path, help="The source of the data to be rendered. " "(Conflicts with specifying measurement_id)", ) Renderer.add_arguments_for_available_renderers(self.parser) def _get_request_auth(self): if os.getenv("ATLAS_FETCH_KEY"): return os.getenv("ATLAS_FETCH_KEY") if self.arguments.auth: return conf["authorisation"]["fetch_aliases"][self.arguments.auth] else: return conf["authorisation"]["fetch"] def _get_request(self): kwargs = { "server": conf["api-server"], "msm_id": self.arguments.measurement_id, "user_agent": self.user_agent, } kwargs["key"] = self._get_request_auth() if self.arguments.probes: kwargs["probe_ids"] = self.arguments.probes if self.arguments.start_time: kwargs["start"] = self.arguments.start_time if self.arguments.stop_time: kwargs["stop"] = self.arguments.stop_time if "start" in kwargs or "stop" in kwargs: return AtlasResultsRequest(**kwargs) return AtlasLatestRequest(**kwargs) def run(self): if self.arguments.measurement_id and self.arguments.from_file: raise RipeAtlasToolsException( "You can only specify one of --from-file or " "measurement_id, not both." ) if self.arguments.measurement_id: results, sample = self._get_results_from_api(self.arguments.measurement_id) use_regular_file = False else: if self.arguments.from_file: use_regular_file = self.arguments.from_file != "-" elif sys.stdin.isatty(): self.parser.print_help() sys.exit(1) else: use_regular_file = False results, sample = self._get_results_from_file(use_regular_file) # Sagan calls measurements "ssl" when they are actually "sslcert" # so we use .raw_data once we have verified and parsed the sample. measurement_type = Result.get(sample).raw_data["type"].lower() renderer = Renderer.get_renderer(self.arguments.renderer, measurement_type)( arguments=self.arguments ) results = SaganSet( iterable=results, probes=self.arguments.probes, ) if self.arguments.probe_asns: asn_filters = set([]) for asn in self.arguments.probe_asns: asn_filters.add(FilterFactory.create("asn", asn)) results = filter_results(asn_filters, list(results)) if self.arguments.aggregate_by: results = aggregate(results, self.get_aggregators()) renderer.render(results) if use_regular_file: self.file.close() def _get_results_from_api(self, measurement_id): results = self._get_request().get()[1] if isinstance(results, list): if not results: raise RipeAtlasToolsException( "There aren't any results for your request." ) else: error = results.get("error") msg = "Error fetching measurement results" if error: msg += ": [{status} {title}] {detail}".format(**error) else: msg = "{} Error fetching measurement results".format(error) raise RipeAtlasToolsException(msg) sample = results[0] return results, sample def _get_results_from_file(self, using_regular_file): """ We need to get the first result from the source in order to detect the type. Additionally, if the source is actually one great big JSON list, then we need to parse it so we iterate over the results since there's no newline characters. """ self.file = sys.stdin if using_regular_file: self.file = open(self.arguments.from_file) # Pop the first line off the source stack. This may very well be a Very # Large String and cause a memory explosion, but we like to let our # users shoot themselves in the foot. sample = next(self.file) # Re-attach the line back onto the iterable so we don't lose anything results = itertools.chain([sample], self.file) # In the case of the Very Large String, we parse out the JSON here if sample.startswith("["): results = json.loads("".join(results)) sample = results[0] # Reassign sample to an actual result return results, sample def get_aggregators(self): """Return aggregators list based on user input""" aggregation_keys = [] for aggr_key in self.arguments.aggregate_by: # Get class and aggregator key aggregation_class = self.AGGREGATORS[aggr_key][1] key = self.AGGREGATORS[aggr_key][0] if aggr_key == "rtt-median": # Get range for the aggregation key_range = self.AGGREGATORS[aggr_key][2] aggregation_keys.append(aggregation_class(key=key, ranges=key_range)) else: aggregation_keys.append(aggregation_class(key=key)) return aggregation_keys
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/report.py
0.498535
0.172102
report.py
pypi
from ...helpers.validators import ArgumentType from ...settings import conf from .base import Command class HttpMeasureCommand(Command): DESCRIPTION = "Create an HTTP measurement and wait for the results" def add_arguments(self): Command.add_arguments(self) self.add_primary_argument(name="target", parser=self.parser) spec = conf["specification"]["types"]["http"] specific = self.parser.add_argument_group("HTTP-specific Options") specific.add_argument( "--header-bytes", type=ArgumentType.integer_range(minimum=0, maximum=2048), default=spec["header-bytes"], help="The maximum number of bytes to retrieve from the header", ) specific.add_argument( "--version", type=str, default=spec["version"], help="The HTTP version to use", ) specific.add_argument( "--method", type=str, default=spec["method"], help="The HTTP method to use" ) specific.add_argument( "--port", type=ArgumentType.integer_range(minimum=1, maximum=65535), default=spec["port"], help="Destination port", ) specific.add_argument("--path", type=str, default=spec["path"], help="") specific.add_argument( "--query-string", type=str, default=spec["query-string"], help="" ) specific.add_argument( "--user-agent", type=str, default=spec["user-agent"], help="The user agent used when performing the request", ) specific.add_argument( "--body-bytes", type=ArgumentType.integer_range(minimum=1, maximum=1020048), default=spec["body-bytes"], help="The maximum number of bytes to retrieve from the body", ) specific.add_argument( "--timing-verbosity", type=int, choices=(0, 1, 2), default=spec["timing-verbosity"], help="The amount of timing information you want returned. 1 " "returns the time to read, to connect, and to first byte, 2 " "returns timing information per read system call. 0 " "(default) returns no additional timing information.", ) def _get_measurement_kwargs(self): r = Command._get_measurement_kwargs(self) keys = ( "header_bytes", "version", "method", "port", "path", "query_string", "user_agent", ) for key in keys: r[key] = getattr(self.arguments, key) if self.arguments.timing_verbosity > 0: r["extended_timing"] = True if self.arguments.timing_verbosity > 1: r["more_extended_timing"] = True r["max_bytes_read"] = self.arguments.body_bytes return r
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/measure/http.py
0.857112
0.165897
http.py
pypi
import json import os import re import webbrowser from collections import OrderedDict from ripe.atlas.cousteau import ( Ping, Traceroute, Dns, Sslcert, Http, Ntp, AtlasSource, AtlasCreateRequest, ) from ripe.atlas.cousteau.measurement import AtlasMeasurement from ...exceptions import RipeAtlasToolsException from ...helpers.colours import colourise from ...helpers.validators import ArgumentType from ...renderers import Renderer from ...settings import conf, aliases, AliasesDB from ...streaming import Stream from ..base import Command as BaseCommand class Command(BaseCommand): NAME = "measure" DESCRIPTION = "Create a measurement and optionally wait for the results" CREATION_CLASSES = OrderedDict( ( ("ping", Ping), ("traceroute", Traceroute), ("dns", Dns), ("sslcert", Sslcert), ("http", Http), ("ntp", Ntp), ("spec", AtlasMeasurement), ) ) def __init__(self, *args, **kwargs): self._type = None self._is_oneoff = True BaseCommand.__init__(self, *args, **kwargs) def _modify_parser_args(self, args): kinds = self.CREATION_CLASSES.keys() error = ( "Usage: ripe-atlas measure <{}> [options]\n" "\n" " Example: ripe-atlas measure ping --target example.com" "".format("|".join(kinds)) ) if not args: raise RipeAtlasToolsException(error) if args[0] not in self.CREATION_CLASSES.keys(): raise RipeAtlasToolsException(error) self._type = args.pop(0) if not args: args.append("--help") return BaseCommand._modify_parser_args(self, args) def add_arguments(self): self.parser.add_argument( "--renderer", choices=Renderer.get_available(), help="The renderer you want to use. If this isn't defined, an " "appropriate renderer will be selected.", ) self.parser.add_argument( "--dry-run", action="store_true", help="Do not create the measurement, only show its definition.", ) # Standard for all types self.parser.add_argument( "--auth", type=str, default=os.getenv("ATLAS_CREATE_KEY", conf["authorisation"]["create"]), help="The API key you want to use to create the measurement. " "(Can be defined with `ripe-atlas configure --set authorisation.create` " "or the ATLAS_CREATE_KEY environment variable)", ) self.parser.add_argument( "--af", type=int, choices=(4, 6), help="The address family, either 4 or 6", ) self.parser.add_argument( "--description", type=str, default=conf["specification"]["description"], help="A free-form description", ) self.parser.add_argument( # Most types "--target", type=ArgumentType.ip_or_domain, help="The target, either a domain name or IP address. If creating " "a DNS measurement, the absence of this option will imply " "that you wish to use the probe's resolver.", ) self.parser.add_argument( "--no-report", action="store_true", help="Don't wait for a response from the measurement, just return " "the URL at which you can later get information about the " "measurement.", ) self.parser.add_argument( "--go-web", action="store_true", help="Open the measurement in a webbrowser immediately.", ) self.parser.add_argument( "--set-alias", help="After creating the measurement, register an alias for it.", type=ArgumentType.alias_is_valid, metavar="ALIAS", ) self.parser.add_argument( "--interval", type=int, help="Rather than run this measurement as a one-off (the default), " "create this measurement as a recurring one, with an interval " "of n seconds between attempted measurements. This option " "implies --no-report.", ) origins = self.parser.add_mutually_exclusive_group() origins.add_argument( "--from-area", type=str, choices=( "WW", "West", "North-Central", "South-Central", "North-East", "South-East", ), help="The area from which you'd like to select your probes.", ) origins.add_argument( "--from-country", type=ArgumentType.country_code, metavar="COUNTRY", help="The two-letter ISO code for the country from which you'd " "like to select your probes. Example: --from-country=GR", ) origins.add_argument( "--from-prefix", type=str, metavar="PREFIX", help="The prefix from which you'd like to select your probes. " "Example: --from-prefix=82.92.0.0/14", ) origins.add_argument( "--from-asn", # http://www.iana.org/assignments/as-numbers/as-numbers.xhtml type=ArgumentType.integer_range(1, 2**32 - 2), metavar="ASN", help="The ASN from which you'd like to select your probes. " "Example: --from-asn=3333", ) origins.add_argument( "--from-probes", type=ArgumentType.comma_separated_integers(minimum=1), metavar="PROBES", help="A comma-separated list of probe-ids you want to use in your " "measurement. Example: --from-probes=1,2,34,157,10006", ) origins.add_argument( "--from-measurement", type=ArgumentType.integer_range(minimum=1), metavar="MEASUREMENT_ID", help="A measurement id which you want to use as the basis for " "probe selection in your new measurement. This is a handy " "way to re-create a measurement under conditions similar to " "another measurement. Example: --from-measurement=1000192", ) self.parser.add_argument( "--probes", type=ArgumentType.integer_range(minimum=1), default=None, help="The number of probes you want to use. Defaults to {}," "unless --from-probes is invoked, in which case the number of " "probes selected is used.".format( conf["specification"]["source"]["requested"] ), ) self.parser.add_argument( "--include-tag", type=ArgumentType.tag, action="append", metavar="TAG", help="Include only probes that are marked with these tags. " "Example: --include-tag=system-ipv6-works", ) self.parser.add_argument( "--exclude-tag", type=ArgumentType.tag, action="append", metavar="TAG", help="Exclude probes that are marked with these tags. " "Example: --exclude-tag=system-ipv6-works", ) self.parser.add_argument( "--group-id", type=int, help="Add newly created measurement to a group that you own", ) # Validation is too complex because it's based on the interval, so # we just accept the round-trip for server-side validation self.parser.add_argument( "--spread", type=int, default=conf["specification"]["spread"], help="Specify the spread of probes within a single measurement interval", ) self.add_flag( parser=self.parser, name="resolve-on-probe", default=conf["specification"]["resolve_on_probe"], help="Resolve the target on each probe instead of once by the server", no_help="Resolve the target once on the server instead of on each probe", ) self.parser.add_argument( "--measurement-tags", help="Comma-separated list of tags to apply to the new measurement", ) self.parser.add_argument( "--stream-limit", type=int, help="The maximum number of results you want to stream, " "defaults to number of requested probes", ) self.parser.add_argument( "--stream-timeout", type=float, default=300, help="Stop streaming new measurements after this number of seconds", ) Renderer.add_arguments_for_available_renderers(self.parser) def run(self): self._account_for_selected_probes() if self.arguments.dry_run: return self.dry_run() is_success, response = self.create() if not is_success: self._handle_api_error(response) # Raises an exception pk = response["measurements"][0] url = "{0}/measurements/{1}/".format(conf["website-url"], pk) self.ok( f"Looking good! Measurement {pk} was created and details about " f"it can be found here:\n\n {url}" ) if self.arguments.go_web: self.ok("Opening the url in the browser\n\n ") if not webbrowser.open(url): self.ok( "It looks like your system doesn't have a web browser " "available. You'll have to go there manually: {0}".format(url) ) if self.arguments.set_alias: alias = self.arguments.set_alias aliases["measurement"][alias] = pk AliasesDB.write(aliases) if not self.arguments.no_report: self.stream(pk, url) def dry_run(self): print(colourise("\nDefinitions:\n{}".format("=" * 80), "bold")) for param, val in self._get_measurement_kwargs().items(): print(colourise("{:<25} {}".format(param, val), "cyan")) print(colourise("\nSources:\n{}".format("=" * 80), "bold")) for param, val in self._get_source_kwargs().items(): if param == "tags": print( colourise( "tags\n include{}{}\n exclude{}{}\n".format( " " * 17, ", ".join(val["include"]), " " * 17, ", ".join(val["exclude"]), ), "cyan", ) ) continue print(colourise("{:<25} {}".format(param, val), "cyan")) def create(self): creation_class = self.CREATION_CLASSES[self._type] return AtlasCreateRequest( server=conf["api-server"], key=self.arguments.auth, user_agent=self.user_agent, measurements=[creation_class(**self._get_measurement_kwargs())], sources=[AtlasSource(**self._get_source_kwargs())], is_oneoff=self._is_oneoff, ).create() def stream(self, pk, url): self.ok("Connecting to stream...") capture_limit = self.arguments.stream_limit or self.arguments.probes stream = Stream( pk, capture_limit=capture_limit, timeout=self.arguments.stream_timeout ) renderer = Renderer.get_renderer(name=self.arguments.renderer, kind=self._type)( arguments=self.arguments ) renderer.render(stream) self.ok("Disconnected from stream") def clean_target(self): if not self.arguments.target: raise RipeAtlasToolsException( "You must specify a target for that kind of measurement" ) return self.arguments.target def clean_description(self): if self.arguments.description: return self.arguments.description if conf["specification"]["description"]: return conf["specification"]["description"] return "{} measurement to {}".format( self._type.capitalize(), self.arguments.target ) def _get_measurement_kwargs(self): # This is kept apart from the r = {} because dns measurements don't # require a target attribute target = self.clean_target() r = { "af": self._get_af(), "description": self.clean_description(), } spec = conf["specification"] # Shorter names are easier to read if self.arguments.interval or spec["times"]["interval"]: r["interval"] = self.arguments.interval self._is_oneoff = False self.arguments.no_report = True elif not spec["times"]["one-off"]: raise RipeAtlasToolsException( "Your configuration file appears to be setup to not create " "one-offs, but also offers no interval value. Without one of " "these, a measurement cannot be created." ) if self.arguments.measurement_tags: tags = self.arguments.measurement_tags.split(",") r["tags"] = tags if target: r["target"] = target if self.arguments.group_id: r["group_id"] = self.arguments.group_id if self.arguments.spread is not None: r["spread"] = self.arguments.spread if self.arguments.resolve_on_probe is not None: r["resolve_on_probe"] = self.arguments.resolve_on_probe return r def _get_source_kwargs(self): r = conf["specification"]["source"] r["requested"] = self.arguments.probes if self.arguments.from_country: r["type"] = "country" r["value"] = self.arguments.from_country elif self.arguments.from_area: r["type"] = "area" r["value"] = self.arguments.from_area elif self.arguments.from_prefix: r["type"] = "prefix" r["value"] = self.arguments.from_prefix elif self.arguments.from_asn: r["type"] = "asn" r["value"] = self.arguments.from_asn elif self.arguments.from_probes: r["type"] = "probes" r["value"] = ",".join([str(_) for _ in self.arguments.from_probes]) elif self.arguments.from_measurement: r["type"] = "msm" r["value"] = self.arguments.from_measurement r["tags"] = { "include": self.arguments.include_tag or [], "exclude": self.arguments.exclude_tag or [], } af = "ipv{}".format(self._get_af()) kind = self._type spec = conf["specification"] for clude in ("in", "ex"): clude += "clude" if not r["tags"][clude]: r["tags"][clude] += spec["tags"][af][kind][clude] r["tags"][clude] += spec["tags"][af]["all"][clude] return r def _get_af(self): """ Returns the specified af, or a guessed one, or the configured one. In that order. """ if self.arguments.af: return self.arguments.af if self.arguments.target: if ":" in self.arguments.target: return 6 if re.match(r"^\d+\.\d+\.\d+\.\d+$", self.arguments.target): return 4 return conf["specification"]["af"] def _account_for_selected_probes(self): """ If the user has used --from-probes, there's a little extra magic we need to do. """ # We can't use argparse's mutually_exclusive_group() method here because # that library doesn't allow partial overlap. if self.arguments.from_probes and self.arguments.probes: raise RipeAtlasToolsException( "Explicit probe selection (--from-probes) in incompatible with " "a --probes argument." ) configured = conf["specification"]["source"]["requested"] if not self.arguments.probes: self.arguments.probes = configured if self.arguments.from_probes: self.arguments.probes = len(self.arguments.from_probes) @staticmethod def _handle_api_error(response): message = "There was a problem communicating with the RIPE Atlas API." if isinstance(response, dict): if response.get("error", {}).get("status") == 403: message += ( "\n\nThe status is 403 so you probably need an API key.\n\n" "Go to https://atlas.ripe.net/keys/ and create a key with the " "permission 'Create a new user defined measurement' and install " "using:\n\n" " ripe-atlas configure --set authorisation.create=MY_API_KEY\n" ) message += f"\n\n{json.dumps(response, indent=2)}" raise RipeAtlasToolsException(message)
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/measure/base.py
0.663669
0.158435
base.py
pypi
from ripe.atlas.sagan.dns import Message from ...exceptions import RipeAtlasToolsException from ...helpers.validators import ArgumentType from ...settings import conf from .base import Command class DnsMeasureCommand(Command): DESCRIPTION = "Create a DNS measurement and wait for the results" def _upper_str(self, s): """ Private method to validate specific command line arguments that should be provided in upper or lower case :param s: string :return: string in upper case """ return s.upper() def add_arguments(self): Command.add_arguments(self) self.add_primary_argument(name="query_argument", parser=self.parser) specific = self.parser.add_argument_group("DNS-specific Options") specific.add_argument( "--protocol", type=self._upper_str, choices=("UDP", "TCP"), default=conf["specification"]["types"]["dns"]["protocol"], help="The protocol used.", ) specific.add_argument( "--query-class", type=self._upper_str, choices=("IN", "CHAOS"), default=conf["specification"]["types"]["dns"]["query-class"], help='The query class. The default is "{}"'.format( conf["specification"]["types"]["dns"]["query-class"] ), ) specific.add_argument( "--query-type", type=self._upper_str, choices=list(Message.ANSWER_CLASSES.keys()) + ["ANY"], # The only ones we can parse default=conf["specification"]["types"]["dns"]["query-type"], help='The query type. The default is "{}"'.format( conf["specification"]["types"]["dns"]["query-type"] ), ) specific.add_argument( "--query-argument", type=str, default=conf["specification"]["types"]["dns"]["query-argument"], help="The DNS label to query", ) self.add_flag( parser=specific, name="set-cd-bit", help="Set DNSSEC Checking Disabled flag (RFC4035)", default=conf["specification"]["types"]["dns"]["set-cd-bit"], ) self.add_flag( parser=specific, name="set-do-bit", help="Set DNSSEC OK flag (RFC3225)", default=conf["specification"]["types"]["dns"]["set-do-bit"], ) self.add_flag( parser=specific, name="set-nsid-bit", help="Set Name Server Identifier flag (RFC5001)", default=conf["specification"]["types"]["dns"]["set-nsid-bit"], ) self.add_flag( parser=specific, name="set-rd-bit", help="Set Recursion Desired flag (RFC1035)", default=conf["specification"]["types"]["dns"]["set-rd-bit"], ) specific.add_argument( "--retry", type=ArgumentType.integer_range(minimum=0, maximum=10), default=conf["specification"]["types"]["dns"]["retry"], help="Number of times to retry", ) specific.add_argument( "--udp-payload-size", type=ArgumentType.integer_range(minimum=512, maximum=4096), default=conf["specification"]["types"]["dns"]["udp-payload-size"], help="May be any integer between 512 and 4096 inclusive", ) specific.add_argument( "--timeout", default=conf["specification"]["types"]["dns"]["timeout"], type=ArgumentType.integer_range(minimum=100, maximum=30000), help="Per packet timeout in milliseconds", ) self.add_flag( parser=specific, name="tls", help="Send query using DNS-over-TLS", default=conf["specification"]["types"]["dns"]["tls"], ) def clean_target(self): """ Targets aren't required for this type """ return self.arguments.target def clean_description(self): if self.arguments.target: return Command.clean_description(self) return "DNS measurement for {}".format(self.arguments.query_argument) def _get_measurement_kwargs(self): r = Command._get_measurement_kwargs(self) for opt in ("class", "type", "argument"): if not getattr(self.arguments, "query_{0}".format(opt)): raise RipeAtlasToolsException( "At a minimum, DNS measurements require a query argument." ) r["query_class"] = self.arguments.query_class r["query_type"] = self.arguments.query_type r["query_argument"] = self.arguments.query_argument r["set_cd_bit"] = self.arguments.set_cd_bit r["set_do_bit"] = self.arguments.set_do_bit r["set_rd_bit"] = self.arguments.set_rd_bit r["set_nsid_bit"] = self.arguments.set_nsid_bit r["protocol"] = self.arguments.protocol r["retry"] = self.arguments.retry r["udp_payload_size"] = self.arguments.udp_payload_size r["use_probe_resolver"] = "target" not in r r["tls"] = self.arguments.tls if self.arguments.timeout is not None: r["timeout"] = self.arguments.timeout return r
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/measure/dns.py
0.801431
0.156523
dns.py
pypi
from ...helpers.validators import ArgumentType from ...settings import conf from .base import Command class TracerouteMeasureCommand(Command): DESCRIPTION = "Create a traceroute measurement and wait for the results" def _upper_str(self, s): """ Private method to validate specific command line arguments that should be provided in upper or lower case :param s: string :return: string in upper case """ return s.upper() def add_arguments(self): Command.add_arguments(self) self.add_primary_argument(name="target", parser=self.parser) spec = conf["specification"]["types"]["traceroute"] specific = self.parser.add_argument_group("Traceroute-specific Options") specific.add_argument( "--packets", type=ArgumentType.integer_range(minimum=1, maximum=16), default=spec["packets"], help="The number of packets sent", ) specific.add_argument( "--size", type=ArgumentType.integer_range(minimum=0, maximum=2048), default=spec["size"], help="The size of packets sent", ) specific.add_argument( "--protocol", type=self._upper_str, choices=("ICMP", "UDP", "TCP"), default=spec["protocol"], help="The protocol used.", ) specific.add_argument( "--timeout", type=ArgumentType.integer_range(minimum=1), default=spec["timeout"], help="The timeout per-packet", ) self.add_flag( parser=specific, name="dont-fragment", default=spec["dont-fragment"], help="Disable fragmentation of outgoing packets", ) specific.add_argument( "--paris", type=ArgumentType.integer_range(minimum=0, maximum=64), default=spec["paris"], help="Use Paris. Value must be between 0 and 64." "If 0, a standard traceroute will be performed", ) specific.add_argument( "--first-hop", type=ArgumentType.integer_range(minimum=1, maximum=255), default=spec["first-hop"], help="Value must be between 1 and 255", ) specific.add_argument( "--max-hops", type=ArgumentType.integer_range(minimum=1, maximum=255), default=spec["max-hops"], help="Value must be between 1 and 255", ) specific.add_argument( "--port", type=ArgumentType.integer_range(minimum=1, maximum=65535), default=spec["port"], help="Destination port, valid for TCP only", ) specific.add_argument( "--destination-option-size", type=ArgumentType.integer_range(minimum=0, maximum=1024), default=spec["destination-option-size"], help="IPv6 destination option header", ) specific.add_argument( "--hop-by-hop-option-size", type=ArgumentType.integer_range(minimum=0, maximum=2048), default=spec["hop-by-hop-option-size"], help=" IPv6 hop by hop option header", ) specific.add_argument( "--duplicate-timeout", default=spec["duplicate-timeout"], type=int, help="Time to wait (in milliseconds) for a duplicate response " "after receiving the first response", ) specific.add_argument( "--response-timeout", default=spec["response-timeout"], type=ArgumentType.integer_range(minimum=1, maximum=60000), help="Response timeout for one packet", ) def _get_measurement_kwargs(self): r = Command._get_measurement_kwargs(self) keys = ( "destination_option_size", "dont_fragment", "first_hop", "hop_by_hop_option_size", "max_hops", "packets", "paris", "port", "protocol", "size", "timeout", ) for key in keys: r[key] = getattr(self.arguments, key) optional_keys = ["duplicate_timeout", "response_timeout"] for key in optional_keys: val = getattr(self.arguments, key) if val is not None: r[key] = val return r
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/measure/traceroute.py
0.864939
0.219923
traceroute.py
pypi
from ...exceptions import RipeAtlasToolsException from ..base import Factory as BaseFactory from .ping import PingMeasureCommand from .traceroute import TracerouteMeasureCommand from .dns import DnsMeasureCommand from .sslcert import SslcertMeasureCommand from .http import HttpMeasureCommand from .ntp import NtpMeasureCommand from .spec import SpecMeasureCommand class Factory(BaseFactory): TYPES = { "ping": PingMeasureCommand, "traceroute": TracerouteMeasureCommand, "dns": DnsMeasureCommand, "sslcert": SslcertMeasureCommand, "http": HttpMeasureCommand, "ntp": NtpMeasureCommand, "spec": SpecMeasureCommand, } DESCRIPTION = "Create a measurement and wait for the results" def __init__(self, sys_args): self.build_class = None self.sys_args = sys_args if len(self.sys_args) >= 2: self.build_class = self.TYPES.get(self.sys_args[1].lower()) if not self.build_class: self.raise_log() def raise_log(self): """Depending on the input raise with different log message.""" # cases: 1) ripe-atlas measure 2) ripe-atlas measure --help/-h if len(self.sys_args) == 1 or ( len(self.sys_args) == 2 and self.sys_args[1] in ("--help", "-h") ): log = "Usage: ripe-atlas measure <type> [arguments]\n\n" "Types:\n" for type_name, type_ in sorted(self.TYPES.items()): log += f"\t{type_name:<12} {type_.DESCRIPTION}\n" log += ( "\nFor extended options for a specific measurement type, " "try ripe-atlas measure <type> --help." ) # cases: ripe-atlas measure bla else: log = ( "The measurement type you requested is invalid. " "Please choose one of {}." ).format(", ".join(self.TYPES.keys())) raise RipeAtlasToolsException(log) def create(self, *args, **kwargs): return self.build_class(*args, **kwargs)
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/commands/measure/__init__.py
0.505615
0.182608
__init__.py
pypi
import itertools class ValueKeyAggregator(object): """Aggregator based on tha actual value of the key/attribute""" def __init__(self, key, prefix=None): self.aggregation_keys = key.split(".") self.key_prefix = prefix or self.aggregation_keys[-1].upper() def get_key_value(self, entity): """ Returns the value of the key/attribute the aggregation will use to bucketize probes/results """ attribute = entity for key in self.aggregation_keys: attribute = getattr(attribute, key) return attribute def get_bucket(self, entity): """ Returns the bucket the specific entity belongs to based on the give key/attribute """ return "{0}: {1}".format(self.key_prefix, self.get_key_value(entity)) class RangeKeyAggregator(ValueKeyAggregator): """ Aggregator based on where the position of the value of the key/attribute is in the given range """ def __init__(self, key, ranges): ValueKeyAggregator.__init__(self, key) self.aggregation_ranges = sorted(ranges, reverse=True) def get_bucket(self, entity): """ Returns the bucket the specific entity belongs to based on the give key/attribute """ bucket = "{0}: < {1}".format( self.key_prefix, self.aggregation_ranges[-1] ) key_value = self.get_key_value(entity) for index, krange in enumerate(self.aggregation_ranges): if key_value > krange: if index == 0: bucket = "{0}: > {1}".format(self.key_prefix, krange) else: bucket = "{0}: {1}-{2}".format( self.key_prefix, krange, self.aggregation_ranges[index - 1], ) break return bucket def _get_sort_key(kv): key = [] for is_digit, part in itertools.groupby(kv[0], key=str.isdigit): part = "".join(part) if is_digit: part = int(part) key.append(part) return key def aggregate(entities, aggregators): """ Aggregate the given entities using the given aggregators. Returns a dict of {combined_aggregation_key_tuple: entity_list}, where the keys are in ascending numeric >> lexical order. """ if not aggregators: return entities buckets = {} for e in entities: key = " | ".join(a.get_bucket(e) for a in aggregators) bucket = buckets.setdefault(key, []) bucket.append(e) return dict(sorted(buckets.items(), key=_get_sort_key))
/ripe.atlas.tools-3.1.0-py3-none-any.whl/ripe/atlas/tools/aggregators/base.py
0.799168
0.252897
base.py
pypi
from textwrap import wrap from typing import Any, Dict, List, Optional, Tuple, Union from colorama import Back, Fore, Style from tabulate import tabulate from .formatters import get_formatter TableData = List[Dict[str, Any]] TableStructure = Tuple[ Tuple[ Union[str, Tuple[str, ...]], Optional[str], Optional[str], Optional[int], ], ..., ] def colourise( data: str, foreground: Optional[str] = None, background: Optional[str] = None, style: Optional[str] = None, ): prefix = "{}{}{}".format( getattr(Fore, foreground) if foreground else "", getattr(Back, background) if background else "", getattr(Style, style) if style else "", ) return f"{prefix}{data}{Style.RESET_ALL}" class TableFilter: def __init__(self, data: TableData, structure: TableStructure): self.data = data self.structure = structure def render(self) -> str: rows = [] for d in self.data: row = [] for column in self.structure: row.append( self._get_row( d, *(column[:1] + column[2:]), # type: ignore ) ) rows.append(row) return "{}{}{}".format( Style.DIM, tabulate( rows, headers=[_[1] for _ in self.structure], tablefmt="fancy_grid", ), Style.RESET_ALL, ) @classmethod def _get_row( cls, data: Dict[str, Any], key: Union[str, Tuple[str]], formatter_name: str = "str", width: int = 0, ): arg_names = key if isinstance(key, str): arg_names = (key,) formatter = get_formatter(formatter_name) return formatter(*[cls._wrap(data[name], width) for name in arg_names]) @staticmethod def _wrap(text: str, width: int): if width: return f"{Style.DIM}\n{Style.NORMAL}".join(wrap(text, width)) return text def as_table(data: TableData, structure: TableStructure) -> str: return TableFilter(data=data, structure=structure).render()
/ripe.stat.cli-0.1.0-py3-none-any.whl/ripe/stat/cli/filters.py
0.84869
0.277657
filters.py
pypi
import os import abc import sys from importlib import machinery extension_suffixes = machinery.EXTENSION_SUFFIXES # Which types with buffer interface we support (apart from byte strings) _buffer_type = (bytearray, memoryview) def ripemd_filename(dir_comps, filename): """Return the complete file name for the module dir_comps : list of string The list of directory names in the ripemd package. The first element must be "ripemd". filename : string The filename (inclusing extension) in the target directory. """ if dir_comps[0] != "ripemd": raise ValueError("Only available for modules under 'ripemnd'") dir_comps = list(dir_comps[1:]) + [filename] root_lib, _ = os.path.split(os.path.abspath(__file__)) return os.path.join(root_lib, *dir_comps) class _VoidPointer(object): @abc.abstractmethod def get(self): """Return the memory location we point to""" return @abc.abstractmethod def address_of(self): """Return a raw pointer to this pointer""" return try: # Starting from v2.18, pycparser (used by cffi for in-line ABI mode) # stops working correctly when PYOPTIMIZE==2 or the parameter -OO is # passed. In that case, we fall back to ctypes. # Note that PyPy ships with an old version of pycparser so we can keep # using cffi there. # See https://github.com/Legrandin/pycryptodome/issues/228 if '__pypy__' not in sys.builtin_module_names and sys.flags.optimize == 2: raise ImportError("CFFI with optimize=2 fails due to pycparser bug.") from cffi import FFI ffi = FFI() null_pointer = ffi.NULL uint8_t_type = ffi.typeof(ffi.new("const uint8_t*")) _Array = ffi.new("uint8_t[1]").__class__.__bases__ def load_lib(name, cdecl): """Load a shared library and return a handle to it. @name, either an absolute path or the name of a library in the system search path. @cdecl, the C function declarations. """ if hasattr(ffi, "RTLD_DEEPBIND") and not os.getenv('PYCRYPTODOME_DISABLE_DEEPBIND'): lib = ffi.dlopen(name, ffi.RTLD_DEEPBIND) else: lib = ffi.dlopen(name) ffi.cdef(cdecl) return lib def c_ulong(x): """Convert a Python integer to unsigned long""" return x c_ulonglong = c_ulong c_uint = c_ulong c_ubyte = c_ulong def c_size_t(x): """Convert a Python integer to size_t""" return x def create_string_buffer(init_or_size, size=None): """Allocate the given amount of bytes (initially set to 0)""" if isinstance(init_or_size, bytes): size = max(len(init_or_size) + 1, size) result = ffi.new("uint8_t[]", size) result[:] = init_or_size else: if size: raise ValueError("Size must be specified once only") result = ffi.new("uint8_t[]", init_or_size) return result def get_c_string(c_string): """Convert a C string into a Python byte sequence""" return ffi.string(c_string) def get_raw_buffer(buf): """Convert a C buffer into a Python byte sequence""" return ffi.buffer(buf)[:] def c_uint8_ptr(data): if isinstance(data, _buffer_type): # This only works for cffi >= 1.7 return ffi.cast(uint8_t_type, ffi.from_buffer(data)) elif isinstance(data, bytes) or isinstance(data, _Array): return data else: raise TypeError("Object type %s cannot be passed to C code" % type(data)) class VoidPointer_cffi(_VoidPointer): """Model a newly allocated pointer to void""" def __init__(self): self._pp = ffi.new("void *[1]") def get(self): return self._pp[0] def address_of(self): return self._pp def VoidPointer(): return VoidPointer_cffi() backend = "cffi" except ImportError: import ctypes from ctypes import (CDLL, c_void_p, byref, c_ulong, c_ulonglong, c_size_t, create_string_buffer, c_ubyte, c_uint) from ctypes.util import find_library from ctypes import Array as _Array null_pointer = None cached_architecture = [] def c_ubyte(c): if not (0 <= c < 256): raise OverflowError() return ctypes.c_ubyte(c) def load_lib(name, cdecl): if not cached_architecture: # platform.architecture() creates a subprocess, so caching the # result makes successive imports faster. import platform cached_architecture[:] = platform.architecture() bits, linkage = cached_architecture if "." not in name and not linkage.startswith("Win"): full_name = find_library(name) if full_name is None: raise OSError("Cannot load library '%s'" % name) name = full_name return CDLL(name) def get_c_string(c_string): return c_string.value def get_raw_buffer(buf): return buf.raw # ---- Get raw pointer --- _c_ssize_t = ctypes.c_ssize_t _PyBUF_SIMPLE = 0 _PyObject_GetBuffer = ctypes.pythonapi.PyObject_GetBuffer _PyBuffer_Release = ctypes.pythonapi.PyBuffer_Release _py_object = ctypes.py_object _c_ssize_p = ctypes.POINTER(_c_ssize_t) # See Include/object.h for CPython # and https://github.com/pallets/click/blob/master/src/click/_winconsole.py class _Py_buffer(ctypes.Structure): _fields_ = [ ('buf', c_void_p), ('obj', ctypes.py_object), ('len', _c_ssize_t), ('itemsize', _c_ssize_t), ('readonly', ctypes.c_int), ('ndim', ctypes.c_int), ('format', ctypes.c_char_p), ('shape', _c_ssize_p), ('strides', _c_ssize_p), ('suboffsets', _c_ssize_p), ('internal', c_void_p) ] # Extra field for CPython 2.6/2.7 if sys.version_info[0] == 2: _fields_.insert(-1, ('smalltable', _c_ssize_t * 2)) def c_uint8_ptr(data): if isinstance(data, bytes) or isinstance(data, _Array): return data elif isinstance(data, _buffer_type): obj = _py_object(data) buf = _Py_buffer() _PyObject_GetBuffer(obj, byref(buf), _PyBUF_SIMPLE) try: buffer_type = ctypes.c_ubyte * buf.len return buffer_type.from_address(buf.buf) finally: _PyBuffer_Release(byref(buf)) else: raise TypeError("Object type %s cannot be passed to C code" % type(data)) # --- class VoidPointer_ctypes(_VoidPointer): """Model a newly allocated pointer to void""" def __init__(self): self._p = c_void_p() def get(self): return self._p def address_of(self): return byref(self._p) def VoidPointer(): return VoidPointer_ctypes() backend = "ctypes" class SmartPointer(object): """Class to hold a non-managed piece of memory""" def __init__(self, raw_pointer, destructor): self._raw_pointer = raw_pointer self._destructor = destructor def get(self): return self._raw_pointer def release(self): rp, self._raw_pointer = self._raw_pointer, None return rp def __del__(self): try: if self._raw_pointer is not None: self._destructor(self._raw_pointer) self._raw_pointer = None except AttributeError: pass def load_ripemd_raw_lib(name, cdecl): """Load a shared library and return a handle to it. @name, the name of the library expressed as a ripemd module, for instance ripemd.ripemd160. @cdecl, the C function declarations. """ split = name.split(".") dir_comps, basename = split[:-1], split[-1] attempts = [] for ext in extension_suffixes: try: filename = basename + ext full_name = ripemd_filename(dir_comps, filename) if not os.path.isfile(full_name): attempts.append("Not found '%s'" % filename) continue return load_lib(full_name, cdecl) except OSError as exp: attempts.append("Cannot load '%s': %s" % (filename, str(exp))) raise OSError("Cannot load native module '%s': %s" % (name, ", ".join(attempts))) def is_buffer(x): """Return True if object x supports the buffer interface""" return isinstance(x, (bytes, bytearray, memoryview)) def is_writeable_buffer(x): return (isinstance(x, bytearray) or (isinstance(x, memoryview) and not x.readonly))
/ripemd-hash-1.0.1.tar.gz/ripemd-hash-1.0.1/lib/ripemd/_raw_api.py
0.52902
0.301092
_raw_api.py
pypi
from ripemd._raw_api import (load_ripemd_raw_lib, VoidPointer, SmartPointer, create_string_buffer, get_raw_buffer, c_size_t, c_uint8_ptr) _raw_ripemd160_lib = load_ripemd_raw_lib( "ripemd._ripemd160", """ int ripemd160_init(void **shaState); int ripemd160_destroy(void *shaState); int ripemd160_update(void *hs, const uint8_t *buf, size_t len); int ripemd160_digest(const void *shaState, uint8_t digest[20]); int ripemd160_copy(const void *src, void *dst); """) class RIPEMD160Hash(object): """A RIPEMD-160 hash object. Do not instantiate directly. Use the :func:`new` function. :ivar oid: ASN.1 Object ID :vartype oid: string :ivar block_size: the size in bytes of the internal message block, input to the compression function :vartype block_size: integer :ivar digest_size: the size in bytes of the resulting hash :vartype digest_size: integer """ # The size of the resulting hash in bytes. digest_size = 20 # The internal block size of the hash algorithm in bytes. block_size = 64 # ASN.1 Object ID oid = "1.3.36.3.2.1" def __init__(self, data=None): state = VoidPointer() result = _raw_ripemd160_lib.ripemd160_init(state.address_of()) if result: raise ValueError("Error %d while instantiating RIPEMD160" % result) self._state = SmartPointer(state.get(), _raw_ripemd160_lib.ripemd160_destroy) if data: self.update(data) def update(self, data): """Continue hashing of a message by consuming the next chunk of data. Args: data (byte string/byte array/memoryview): The next chunk of the message being hashed. """ result = _raw_ripemd160_lib.ripemd160_update(self._state.get(), c_uint8_ptr(data), c_size_t(len(data))) if result: raise ValueError("Error %d while instantiating ripemd160" % result) def digest(self): """Return the **binary** (non-printable) digest of the message that has been hashed so far. :return: The hash digest, computed over the data processed so far. Binary form. :rtype: byte string """ bfr = create_string_buffer(self.digest_size) result = _raw_ripemd160_lib.ripemd160_digest(self._state.get(), bfr) if result: raise ValueError("Error %d while instantiating ripemd160" % result) return get_raw_buffer(bfr) def copy(self): """Return a copy ("clone") of the hash object. The copy will have the same internal state as the original hash object. This can be used to efficiently compute the digests of strings that share a common initial substring. :return: A hash object of the same type """ clone = RIPEMD160Hash() result = _raw_ripemd160_lib.ripemd160_copy(self._state.get(), clone._state.get()) if result: raise ValueError("Error %d while copying ripemd160" % result) return clone def new(self, data=None): """Create a fresh RIPEMD-160 hash object.""" return RIPEMD160Hash(data) def new(data=None): """Create a new hash object. :parameter data: Optional. The very first chunk of the message to hash. It is equivalent to an early call to :meth:`RIPEMD160Hash.update`. :type data: byte string/byte array/memoryview :Return: A :class:`RIPEMD160Hash` hash object """ return RIPEMD160Hash().new(data) def ripemd160(data): """ Return the **binary** (non-printable) digest of data. :return: The hash digest of data. Binary form. :rtype: byte string """ return RIPEMD160Hash().new(data).digest() # The size of the resulting hash in bytes. digest_size = RIPEMD160Hash.digest_size # The internal block size of the hash algorithm in bytes. block_size = RIPEMD160Hash.block_size
/ripemd-hash-1.0.1.tar.gz/ripemd-hash-1.0.1/lib/ripemd/ripemd160.py
0.845465
0.341239
ripemd160.py
pypi
from __future__ import unicode_literals import colorfield.fields import djangocms_attributes_field.fields import django.core.validators import django.db.models.deletion from django.db import models, migrations class Migration(migrations.Migration): initial = True dependencies = [ ('filer', '0007_auto_20161016_1055'), ('cms', '0018_pagenode'), ] operations = [ migrations.CreateModel( name='CarouselPlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_aoxomoxoa_carouselplugin', serialize=False, to='cms.CMSPlugin')), ('tile_height', models.PositiveSmallIntegerField(default=150, verbose_name='tile height')), ('tile_width', models.PositiveSmallIntegerField(default=180, verbose_name='tile height')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_carousel_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='The align of the carousel.', max_length=6, verbose_name='align')), ('theme_carousel_offset', models.PositiveSmallIntegerField(default=0, help_text='The offset of the carousel from the align sides.', verbose_name='offset')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('carousel_autoplay', models.BooleanField(default=True, help_text='Autoplay of the carousel on start.', verbose_name='carousel autoplay')), ('carousel_autoplay_direction', models.CharField(choices=[('left', 'Left'), ('right', 'Right')], default='right', help_text='Autoplay direction.', max_length=6, verbose_name='scroll easing')), ('carousel_autoplay_pause_onhover', models.BooleanField(default=True, help_text='Pause the autoplay on mouse over.', verbose_name='pause on hover')), ('carousel_autoplay_timeout', models.PositiveSmallIntegerField(default=3000, verbose_name='autoplay timeout')), ('carousel_navigation_numtiles', models.PositiveSmallIntegerField(default=3, help_text='Number of tiles to scroll when user clicks on next/prev button.', verbose_name='navigation numtiles')), ('carousel_padding', models.PositiveSmallIntegerField(default=8, help_text='Padding at the sides of the carousel.', verbose_name='padding')), ('carousel_scroll_duration', models.PositiveSmallIntegerField(default=500, help_text='Duration of scrolling to tile.', verbose_name='scroll duration')), ('carousel_scroll_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutCubic', help_text='Easing of scrolling to tile animation.', max_length=17, verbose_name='scroll easing')), ('carousel_space_between_tiles', models.PositiveSmallIntegerField(default=20, verbose_name='space between tiles')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('theme_navigation_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='The align of the navigation.', max_length=6, verbose_name='navigation align')), ('theme_navigation_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='horizontal offset')), ('theme_space_between_arrows', models.PositiveSmallIntegerField(default=5, help_text='The space between arrows in the navigation.', verbose_name='space between arrows')), ('theme_enable_navigation', models.BooleanField(default=True, verbose_name='enable navigation')), ('theme_navigation_enable_play', models.BooleanField(default=True, help_text='enable / disable the play button of the navigation.', verbose_name='enable play')), ('theme_navigation_margin', models.PositiveSmallIntegerField(default=20, help_text='The space between the carousel and the navigation.', verbose_name='margin')), ('theme_navigation_position', models.CharField(choices=[('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The vertical position of the navigation reative to the carousel.', max_length=6, verbose_name='position')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=0, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=True, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=True, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('slug', models.SlugField(verbose_name='slug')), ('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')), ('full_thumbnail_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_unite_plugins_full', related_query_name='carousel_unite_plugin_full', to='filer.ThumbnailOption', verbose_name='fullscreen thumbnail option')), ], options={ 'verbose_name': 'Carousel plugin', 'verbose_name_plural': 'Carousel plugins', }, bases=('cms.cmsplugin', models.Model), ), migrations.CreateModel( name='CarouselSavedUniteOptions', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tile_height', models.PositiveSmallIntegerField(default=150, verbose_name='tile height')), ('tile_width', models.PositiveSmallIntegerField(default=180, verbose_name='tile height')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_carousel_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='The align of the carousel.', max_length=6, verbose_name='align')), ('theme_carousel_offset', models.PositiveSmallIntegerField(default=0, help_text='The offset of the carousel from the align sides.', verbose_name='offset')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('carousel_autoplay', models.BooleanField(default=True, help_text='Autoplay of the carousel on start.', verbose_name='carousel autoplay')), ('carousel_autoplay_direction', models.CharField(choices=[('left', 'Left'), ('right', 'Right')], default='right', help_text='Autoplay direction.', max_length=6, verbose_name='scroll easing')), ('carousel_autoplay_pause_onhover', models.BooleanField(default=True, help_text='Pause the autoplay on mouse over.', verbose_name='pause on hover')), ('carousel_autoplay_timeout', models.PositiveSmallIntegerField(default=3000, verbose_name='autoplay timeout')), ('carousel_navigation_numtiles', models.PositiveSmallIntegerField(default=3, help_text='Number of tiles to scroll when user clicks on next/prev button.', verbose_name='navigation numtiles')), ('carousel_padding', models.PositiveSmallIntegerField(default=8, help_text='Padding at the sides of the carousel.', verbose_name='padding')), ('carousel_scroll_duration', models.PositiveSmallIntegerField(default=500, help_text='Duration of scrolling to tile.', verbose_name='scroll duration')), ('carousel_scroll_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutCubic', help_text='Easing of scrolling to tile animation.', max_length=17, verbose_name='scroll easing')), ('carousel_space_between_tiles', models.PositiveSmallIntegerField(default=20, verbose_name='space between tiles')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('theme_navigation_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='The align of the navigation.', max_length=6, verbose_name='navigation align')), ('theme_navigation_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='horizontal offset')), ('theme_space_between_arrows', models.PositiveSmallIntegerField(default=5, help_text='The space between arrows in the navigation.', verbose_name='space between arrows')), ('theme_enable_navigation', models.BooleanField(default=True, verbose_name='enable navigation')), ('theme_navigation_enable_play', models.BooleanField(default=True, help_text='enable / disable the play button of the navigation.', verbose_name='enable play')), ('theme_navigation_margin', models.PositiveSmallIntegerField(default=20, help_text='The space between the carousel and the navigation.', verbose_name='margin')), ('theme_navigation_position', models.CharField(choices=[('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The vertical position of the navigation reative to the carousel.', max_length=6, verbose_name='position')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=0, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=True, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=True, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('name', models.CharField(max_length=255, verbose_name='name')), ], options={ 'verbose_name': 'Carousel unite options', 'verbose_name_plural': 'Carousel unite options', }, ), migrations.CreateModel( name='CompactThemePlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_aoxomoxoa_compactthemeplugin', serialize=False, to='cms.CMSPlugin')), ('theme_hide_panel_under_width', models.PositiveSmallIntegerField(blank=True, default=480, help_text="Hide panel under certain browser width, if null, don't hide.", null=True, verbose_name='hide panel under width')), ('theme_panel_position', models.CharField(choices=[('top', 'Top'), ('bottom', 'Bottom'), ('left', 'Left'), ('right', 'Right')], default='bottom', help_text='Thumbs panel position.', max_length=6, verbose_name='panel position')), ('slider_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('slider_arrow_left_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Left arrow horizonal alignment.', max_length=6, verbose_name='left arrow alignment')), ('slider_arrow_left_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='left arrow vertical alignment')), ('slider_arrow_left_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='left arrow horizontal offset')), ('slider_arrow_left_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='left arrow vertical offset')), ('slider_arrow_right_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Right arrow horizonal alignment.', max_length=6, verbose_name='Right arrow alignment')), ('slider_arrow_right_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='light arrow vertical alignment')), ('slider_arrow_right_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='right arrow horizontal offset')), ('slider_arrow_right_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='right arrow vertical offset')), ('slider_bullets_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Bullets horizontal alignment.', max_length=6, verbose_name='bullets alignment')), ('slider_bullets_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='bottom', max_length=6, verbose_name='bullets vertical alignment')), ('slider_bullets_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='bullets horizontal offset')), ('slider_bullets_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='bullets vertical offset')), ('slider_bullets_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the bullets, if blank inherit from gallery skin.', max_length=255, verbose_name='bullets skin')), ('slider_bullets_space_between', models.SmallIntegerField(blank=True, default=None, help_text='Set the space between bullets. If blank then will be set default space from the skins.', null=True, verbose_name='space between bullets')), ('slider_controls_always_on', models.BooleanField(default=True, help_text='Controls are always on, false - show only on mouseover.', verbose_name='controls always on')), ('slider_controls_appear_duration', models.PositiveSmallIntegerField(default=300, help_text='The duration of appearing controls.', verbose_name='controls appear duration')), ('slider_controls_appear_ontap', models.BooleanField(default=True, help_text='Appear controls on tap event on touch devices.', verbose_name='controls appear on tap')), ('slider_control_swipe', models.BooleanField(default=True, verbose_name='enable swiping control')), ('slider_control_zoom', models.BooleanField(default=True, verbose_name='enable zooming control')), ('slider_enable_arrows', models.BooleanField(default=True, verbose_name='enable arrows')), ('slider_enable_bullets', models.BooleanField(default=False, verbose_name='enable bullets')), ('slider_enable_fullscreen_button', models.BooleanField(default=True, verbose_name='enable fullscreen button')), ('slider_enable_play_button', models.BooleanField(default=True, verbose_name='enable play button')), ('slider_enable_progress_indicator', models.BooleanField(default=True, verbose_name='enable progress indicator')), ('slider_enable_text_panel', models.BooleanField(default=False, verbose_name='enable text panel')), ('slider_enable_zoom_panel', models.BooleanField(default=True, help_text='Enable the zoom buttons, works together with zoom control.', verbose_name='enable zoom panel')), ('slider_fullscreen_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Fullscreen button horizonatal alignment.', max_length=6, verbose_name='fullscreen button alignment')), ('slider_fullscreen_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='fullscreen button vertical alignment')), ('slider_fullscreen_button_offset_hor', models.PositiveSmallIntegerField(default=11, verbose_name='fullscreen button horizontal offset')), ('slider_fullscreen_button_offset_vert', models.PositiveSmallIntegerField(default=9, verbose_name='fullscreen button vertical offset')), ('slider_fullscreen_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider fullscreen button, if empty inherit from gallery skin.', max_length=255, verbose_name='fullscreen button skin')), ('slider_item_padding_bottom', models.PositiveSmallIntegerField(default=0, verbose_name='slider item bottom padding')), ('slider_item_padding_left', models.PositiveSmallIntegerField(default=0, verbose_name='slider item left padding')), ('slider_item_padding_right', models.PositiveSmallIntegerField(default=0, verbose_name='slider item right padding')), ('slider_item_padding_top', models.PositiveSmallIntegerField(default=0, verbose_name='slider item top padding')), ('slider_loader_color', models.CharField(choices=[('white', 'White'), ('black', 'Black')], default='white', max_length=6, verbose_name='loader color')), ('slider_loader_type', models.PositiveSmallIntegerField(choices=[(1, 'Shape 1'), (2, 'Shape 2'), (3, 'Shape 3'), (4, 'Shape 4'), (5, 'Shape 5'), (6, 'Shape 6'), (7, 'Shape 7')], default=1, verbose_name='shape of the loader')), ('slider_play_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Play button horizonatal alignment.', max_length=6, verbose_name='play button alignment')), ('slider_play_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='play button vertical alignment')), ('slider_play_button_offset_hor', models.PositiveSmallIntegerField(default=40, verbose_name='play button horizontal offset')), ('slider_play_button_offset_vert', models.PositiveSmallIntegerField(default=8, verbose_name='play button vertical offset')), ('slider_play_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider play button, if empty inherit from gallery skin.', max_length=255, verbose_name='play button skin')), ('slider_progressbar_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='progress bar color')), ('slider_progressbar_line_width', models.PositiveSmallIntegerField(default=5, verbose_name='progress bar line width')), ('slider_progressbar_opacity', models.PositiveSmallIntegerField(default=60, help_text='The opacity of the progress bar.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='progress bar opacity (%)')), ('slider_progresspie_color1', colorfield.fields.ColorField(default='#B5B5B5', max_length=18, verbose_name='progress pie first color')), ('slider_progresspie_color2', colorfield.fields.ColorField(default='#E5E5E5', max_length=18, verbose_name='progress pie second color')), ('slider_progresspie_height', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie height')), ('slider_progresspie_stroke_width', models.PositiveSmallIntegerField(default=6, verbose_name='progress pie stroke width')), ('slider_progresspie_type_fill', models.BooleanField(choices=[(False, 'Stroke'), (True, 'Fill')], default=False, verbose_name='progress pie type')), ('slider_progresspie_width', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie width')), ('slider_progress_indicator_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Progress indicator horizonatal alignment.', max_length=6, verbose_name='progress indicator alignment')), ('slider_progress_indicator_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='progress indicator vertical alignment')), ('slider_progress_indicator_offset_hor', models.PositiveSmallIntegerField(default=16, verbose_name='progress indicator horizontal offset ')), ('slider_progress_indicator_offset_vert', models.PositiveSmallIntegerField(default=36, verbose_name='progress indicator vertical offset ')), ('slider_progress_indicator_type', models.CharField(choices=[('pie', 'Pie'), ('pie2', 'Pie 2'), ('bar', 'Bar')], default='pie', help_text='if pie not supported, it will switch to bar automatically.', max_length=4, verbose_name='progress indicator type')), ('slider_scale_mode', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode')), ('slider_scale_mode_fullscreen', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='down', max_length=4, verbose_name='fullscreen scale mode')), ('slider_scale_mode_media', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode on media items')), ('slider_textpanel_always_on', models.BooleanField(default=True, help_text='Text panel are always on or show only on mouseover.', verbose_name='text panel always on')), ('slider_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='text panel background color')), ('slider_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, verbose_name='text panel background CSS')), ('slider_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='text panel background opacity (%)')), ('slider_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='text panel description CSS')), ('slider_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='text panel title CSS')), ('slider_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('slider_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel description text color')), ('slider_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('slider_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('slider_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_textpanel_enable_bg', models.BooleanField(default=True, help_text='Enable the textpanel background.', verbose_name='enable background')), ('slider_textpanel_enable_description', models.BooleanField(default=True, help_text='Enable the description text.', verbose_name='enable description')), ('slider_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('slider_textpanel_fade_duration', models.PositiveSmallIntegerField(default=200, verbose_name='text panel fade duration ')), ('slider_textpanel_height', models.PositiveSmallIntegerField(blank=True, default=None, help_text='If blank it will be set dynamically.', null=True, verbose_name='text panel height ')), ('slider_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=10, verbose_name='text panel bottom padding')), ('slider_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, verbose_name='text panel left padding')), ('slider_textpanel_padding_right', models.PositiveSmallIntegerField(default=10, verbose_name='text panel right padding')), ('slider_textpanel_padding_title_description', models.PositiveSmallIntegerField(default=5, help_text='the space between the title and the description.', verbose_name='text panel title padding')), ('slider_textpanel_padding_top', models.PositiveSmallIntegerField(default=10, verbose_name='text panel top padding')), ('slider_textpanel_text_valign', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='text panel vertical alignment')), ('slider_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold title')), ('slider_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel title text color')), ('slider_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('slider_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='title font size (px)')), ('slider_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_transition', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='The transition of the slide change.', max_length=5, verbose_name='transition')), ('slider_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Transition easing function of slide change.', max_length=17, verbose_name='transition easing')), ('slider_transition_speed', models.PositiveSmallIntegerField(default=300, help_text='Transition duration of slide change.', verbose_name='transition speed')), ('slider_videoplay_button_type', models.CharField(choices=[('square', 'Square'), ('round', 'Round')], default='square', max_length=6, verbose_name='videoplay button type')), ('slider_zoompanel_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Zoom panel horizonatal alignment.', max_length=6, verbose_name='zoom panel alignment')), ('slider_zoompanel_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='zoom panel vertical alignment')), ('slider_zoompanel_offset_hor', models.PositiveSmallIntegerField(default=12, verbose_name='zoom panel horizontal offset')), ('slider_zoompanel_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='zoom panel vertical offset')), ('slider_zoompanel_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider zoom panel, if empty inherit from gallery skin.', max_length=255, verbose_name='zoom panel skin')), ('slider_zoom_max_ratio', models.PositiveSmallIntegerField(default=6, verbose_name='maximum zoom ratio')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=400, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_skin', models.CharField(choices=[('default', 'Default'), ('alexis', 'Alexis')], max_length=255, verbose_name='gallery skin')), ('gallery_width', models.PositiveSmallIntegerField(default=900, help_text='Gallery width.', verbose_name='width')), ('gallery_autoplay', models.BooleanField(default=False, help_text='Begin slideshow autoplay on start.', verbose_name='gallery autoplay')), ('gallery_carousel', models.BooleanField(default=True, help_text='Next button on last image goes to first image.', verbose_name='carousel')), ('gallery_control_keyboard', models.BooleanField(default=True, help_text='Enable / disable keyboard controls.', verbose_name='keyboard')), ('gallery_control_thumbs_mousewheel', models.BooleanField(default=False, help_text='Enable / disable the mousewheel.', verbose_name='mousewheel')), ('gallery_debug_errors', models.BooleanField(default=True, help_text='show error message when there is some error on the gallery area.', verbose_name='debug errors')), ('gallery_height', models.PositiveSmallIntegerField(default=500, help_text='Gallery height.', verbose_name='height')), ('gallery_images_preload_type', models.CharField(choices=[('all', 'load all the images first time'), ('minimal', 'only image nabours will be loaded each time'), ('visible', 'visible thumbs images will be loaded each time')], default='minimal', help_text='Preload type of the images.', max_length=8, verbose_name='preload type')), ('gallery_min_height', models.PositiveSmallIntegerField(default=300, help_text='Gallery minimal height when resizing.', verbose_name='minimum height')), ('gallery_pause_on_mouseover', models.BooleanField(default=False, help_text='Pause on mouseover when playing slideshow.', verbose_name='pause on mouseover')), ('gallery_play_interval', models.PositiveSmallIntegerField(default=3000, help_text='Play interval of the slideshow.', verbose_name='play interval')), ('gallery_preserve_ratio', models.BooleanField(default=True, help_text='Preserve aspect ratio on window resize.', verbose_name='carousel')), ('thumb_border_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='border color')), ('thumb_border_effect', models.BooleanField(default=True, help_text='Specify if the thumb has border.', verbose_name='border effect')), ('thumb_border_width', models.PositiveSmallIntegerField(default=0, verbose_name='border width')), ('thumb_color_overlay_effect', models.BooleanField(default=True, help_text='Thumbnail color overlay effect, release the overlay on mouseover and selected states.', verbose_name='color overlay effect')), ('thumb_fixed_size', models.BooleanField(choices=[(True, 'Fixed'), (False, 'Dynamic')], default=True, help_text='Fixed/dynamic thumbnail width.', verbose_name='sizing method')), ('thumb_height', models.PositiveSmallIntegerField(default=50, verbose_name='height')), ('thumb_image_overlay_effect', models.BooleanField(default=False, help_text='Images overlay effect on normal state only.', verbose_name='image overlay effect')), ('thumb_image_overlay_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image overlay type')), ('thumb_loader_type', models.CharField(choices=[('dark', 'Dark'), ('light', 'Light')], default='dark', max_length=8, verbose_name='loader type')), ('thumb_overlay_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='overlay color')), ('thumb_overlay_opacity', models.PositiveSmallIntegerField(default=40, help_text='Thumbnail overlay color opacity.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('thumb_overlay_reverse', models.BooleanField(default=False, help_text='Reverse the overlay, will be shown on selected state only.', verbose_name='overlay reverse')), ('thumb_over_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in mouseover state.', max_length=18, verbose_name='mouseover border color')), ('thumb_over_border_width', models.PositiveSmallIntegerField(default=0, help_text='Thumbnail border width in mouseover state.', verbose_name='mouseover border width')), ('thumb_round_corners_radius', models.PositiveSmallIntegerField(default=0, verbose_name='border radius')), ('thumb_selected_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in selected state.', max_length=18, verbose_name='selected border color')), ('thumb_selected_border_width', models.PositiveSmallIntegerField(default=1, help_text='Thumbnail border width in selected state.', verbose_name='selected border width')), ('thumb_show_loader', models.BooleanField(default=True, help_text='Show thumb loader while loading the thumb.', verbose_name='show loader')), ('thumb_transition_duration', models.PositiveSmallIntegerField(default=200, help_text='Thumbnail effect transition duration.', verbose_name='transition duration')), ('thumb_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutQuad', help_text='Thumb effect transition easing.', max_length=17, verbose_name='transition easing')), ('thumb_width', models.PositiveSmallIntegerField(default=88, verbose_name='width')), ('strippanel_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='background color of the strip wrapper, if not set, it will be taken from the CSS.', max_length=18, verbose_name='background color')), ('strippanel_buttons_role', models.CharField(choices=[('scroll_strip', 'Scroll strip'), ('advance_item', 'Advance item')], default='scroll_strip', help_text='Role of the side buttons.', max_length=12, verbose_name='buttons role')), ('strippanel_buttons_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the buttons, if empty inherit from gallery skin.', max_length=255, verbose_name='buttons skin')), ('strippanel_enable_buttons', models.BooleanField(default=False, help_text='Enable buttons from the sides of the panel.', verbose_name='enable buttons')), ('strippanel_enable_handle', models.BooleanField(default=True, verbose_name='enable handle')), ('strippanel_handle_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='top', help_text='Close handle tip alignment on the handle bar according panel orientation.', max_length=6, verbose_name='handle alignment')), ('strippanel_handle_offset', models.PositiveSmallIntegerField(default=0, help_text='Offset of handle bar according the valign.', verbose_name='handle offset')), ('strippanel_handle_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the handle, if empty inherit from gallery skin.', max_length=255, verbose_name='handle skin')), ('strippanel_padding_bottom', models.PositiveSmallIntegerField(default=8, help_text='Space from bottom of the panel.', verbose_name='bottom padding')), ('strippanel_padding_buttons', models.PositiveSmallIntegerField(default=2, help_text='Padding between the buttons and the panel.', verbose_name='buttons padding')), ('strippanel_padding_left', models.PositiveSmallIntegerField(default=0, help_text='Space from left of the panel.', verbose_name='left padding')), ('strippanel_padding_right', models.PositiveSmallIntegerField(default=0, help_text='Space from right of the panel.', verbose_name='right padding')), ('strippanel_padding_top', models.PositiveSmallIntegerField(default=8, help_text='Space from top of the panel.', verbose_name='top padding')), ('strip_control_avia', models.BooleanField(default=False, help_text='Move the strip according strip moseover position.', verbose_name='avia control')), ('strip_control_touch', models.BooleanField(default=True, help_text='Move the strip by dragging it.', verbose_name='touch control')), ('strip_scroll_to_thumb_duration', models.PositiveSmallIntegerField(default=500, help_text='Duration of scrolling to thumb.', verbose_name='scroll animation duration')), ('strip_scroll_to_thumb_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutCubic', help_text='Easing of scrolling to thumb animation.', max_length=17, verbose_name='scroll animation')), ('strip_space_between_thumbs', models.PositiveSmallIntegerField(default=6, verbose_name='space between thumbnails')), ('strip_thumbs_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Alignment of the thumbnails when smaller than the strip size.', max_length=6, verbose_name='thumbnail alignment')), ('strip_thumb_touch_sensetivity', models.PositiveSmallIntegerField(default=2, help_text='1: most sensetive; 100: least sensetive.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='touch sensitivity')), ('slug', models.SlugField(verbose_name='slug')), ('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')), ('full_thumbnail_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='compacttheme_unite_plugins_full', related_query_name='compacttheme_unite_plugin_full', to='filer.ThumbnailOption', verbose_name='fullscreen thumbnail option')), ], options={ 'verbose_name': 'Compact theme plugin', 'verbose_name_plural': 'Compact theme plugins', }, bases=('cms.cmsplugin', models.Model), ), migrations.CreateModel( name='CompactThemeSavedUniteOptions', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('theme_hide_panel_under_width', models.PositiveSmallIntegerField(blank=True, default=480, help_text="Hide panel under certain browser width, if null, don't hide.", null=True, verbose_name='hide panel under width')), ('theme_panel_position', models.CharField(choices=[('top', 'Top'), ('bottom', 'Bottom'), ('left', 'Left'), ('right', 'Right')], default='bottom', help_text='Thumbs panel position.', max_length=6, verbose_name='panel position')), ('slider_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('slider_arrow_left_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Left arrow horizonal alignment.', max_length=6, verbose_name='left arrow alignment')), ('slider_arrow_left_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='left arrow vertical alignment')), ('slider_arrow_left_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='left arrow horizontal offset')), ('slider_arrow_left_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='left arrow vertical offset')), ('slider_arrow_right_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Right arrow horizonal alignment.', max_length=6, verbose_name='Right arrow alignment')), ('slider_arrow_right_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='light arrow vertical alignment')), ('slider_arrow_right_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='right arrow horizontal offset')), ('slider_arrow_right_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='right arrow vertical offset')), ('slider_bullets_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Bullets horizontal alignment.', max_length=6, verbose_name='bullets alignment')), ('slider_bullets_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='bottom', max_length=6, verbose_name='bullets vertical alignment')), ('slider_bullets_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='bullets horizontal offset')), ('slider_bullets_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='bullets vertical offset')), ('slider_bullets_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the bullets, if blank inherit from gallery skin.', max_length=255, verbose_name='bullets skin')), ('slider_bullets_space_between', models.SmallIntegerField(blank=True, default=None, help_text='Set the space between bullets. If blank then will be set default space from the skins.', null=True, verbose_name='space between bullets')), ('slider_controls_always_on', models.BooleanField(default=True, help_text='Controls are always on, false - show only on mouseover.', verbose_name='controls always on')), ('slider_controls_appear_duration', models.PositiveSmallIntegerField(default=300, help_text='The duration of appearing controls.', verbose_name='controls appear duration')), ('slider_controls_appear_ontap', models.BooleanField(default=True, help_text='Appear controls on tap event on touch devices.', verbose_name='controls appear on tap')), ('slider_control_swipe', models.BooleanField(default=True, verbose_name='enable swiping control')), ('slider_control_zoom', models.BooleanField(default=True, verbose_name='enable zooming control')), ('slider_enable_arrows', models.BooleanField(default=True, verbose_name='enable arrows')), ('slider_enable_bullets', models.BooleanField(default=False, verbose_name='enable bullets')), ('slider_enable_fullscreen_button', models.BooleanField(default=True, verbose_name='enable fullscreen button')), ('slider_enable_play_button', models.BooleanField(default=True, verbose_name='enable play button')), ('slider_enable_progress_indicator', models.BooleanField(default=True, verbose_name='enable progress indicator')), ('slider_enable_text_panel', models.BooleanField(default=False, verbose_name='enable text panel')), ('slider_enable_zoom_panel', models.BooleanField(default=True, help_text='Enable the zoom buttons, works together with zoom control.', verbose_name='enable zoom panel')), ('slider_fullscreen_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Fullscreen button horizonatal alignment.', max_length=6, verbose_name='fullscreen button alignment')), ('slider_fullscreen_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='fullscreen button vertical alignment')), ('slider_fullscreen_button_offset_hor', models.PositiveSmallIntegerField(default=11, verbose_name='fullscreen button horizontal offset')), ('slider_fullscreen_button_offset_vert', models.PositiveSmallIntegerField(default=9, verbose_name='fullscreen button vertical offset')), ('slider_fullscreen_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider fullscreen button, if empty inherit from gallery skin.', max_length=255, verbose_name='fullscreen button skin')), ('slider_item_padding_bottom', models.PositiveSmallIntegerField(default=0, verbose_name='slider item bottom padding')), ('slider_item_padding_left', models.PositiveSmallIntegerField(default=0, verbose_name='slider item left padding')), ('slider_item_padding_right', models.PositiveSmallIntegerField(default=0, verbose_name='slider item right padding')), ('slider_item_padding_top', models.PositiveSmallIntegerField(default=0, verbose_name='slider item top padding')), ('slider_loader_color', models.CharField(choices=[('white', 'White'), ('black', 'Black')], default='white', max_length=6, verbose_name='loader color')), ('slider_loader_type', models.PositiveSmallIntegerField(choices=[(1, 'Shape 1'), (2, 'Shape 2'), (3, 'Shape 3'), (4, 'Shape 4'), (5, 'Shape 5'), (6, 'Shape 6'), (7, 'Shape 7')], default=1, verbose_name='shape of the loader')), ('slider_play_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Play button horizonatal alignment.', max_length=6, verbose_name='play button alignment')), ('slider_play_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='play button vertical alignment')), ('slider_play_button_offset_hor', models.PositiveSmallIntegerField(default=40, verbose_name='play button horizontal offset')), ('slider_play_button_offset_vert', models.PositiveSmallIntegerField(default=8, verbose_name='play button vertical offset')), ('slider_play_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider play button, if empty inherit from gallery skin.', max_length=255, verbose_name='play button skin')), ('slider_progressbar_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='progress bar color')), ('slider_progressbar_line_width', models.PositiveSmallIntegerField(default=5, verbose_name='progress bar line width')), ('slider_progressbar_opacity', models.PositiveSmallIntegerField(default=60, help_text='The opacity of the progress bar.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='progress bar opacity (%)')), ('slider_progresspie_color1', colorfield.fields.ColorField(default='#B5B5B5', max_length=18, verbose_name='progress pie first color')), ('slider_progresspie_color2', colorfield.fields.ColorField(default='#E5E5E5', max_length=18, verbose_name='progress pie second color')), ('slider_progresspie_height', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie height')), ('slider_progresspie_stroke_width', models.PositiveSmallIntegerField(default=6, verbose_name='progress pie stroke width')), ('slider_progresspie_type_fill', models.BooleanField(choices=[(False, 'Stroke'), (True, 'Fill')], default=False, verbose_name='progress pie type')), ('slider_progresspie_width', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie width')), ('slider_progress_indicator_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Progress indicator horizonatal alignment.', max_length=6, verbose_name='progress indicator alignment')), ('slider_progress_indicator_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='progress indicator vertical alignment')), ('slider_progress_indicator_offset_hor', models.PositiveSmallIntegerField(default=16, verbose_name='progress indicator horizontal offset ')), ('slider_progress_indicator_offset_vert', models.PositiveSmallIntegerField(default=36, verbose_name='progress indicator vertical offset ')), ('slider_progress_indicator_type', models.CharField(choices=[('pie', 'Pie'), ('pie2', 'Pie 2'), ('bar', 'Bar')], default='pie', help_text='if pie not supported, it will switch to bar automatically.', max_length=4, verbose_name='progress indicator type')), ('slider_scale_mode', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode')), ('slider_scale_mode_fullscreen', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='down', max_length=4, verbose_name='fullscreen scale mode')), ('slider_scale_mode_media', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode on media items')), ('slider_textpanel_always_on', models.BooleanField(default=True, help_text='Text panel are always on or show only on mouseover.', verbose_name='text panel always on')), ('slider_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='text panel background color')), ('slider_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, verbose_name='text panel background CSS')), ('slider_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='text panel background opacity (%)')), ('slider_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='text panel description CSS')), ('slider_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='text panel title CSS')), ('slider_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('slider_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel description text color')), ('slider_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('slider_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('slider_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_textpanel_enable_bg', models.BooleanField(default=True, help_text='Enable the textpanel background.', verbose_name='enable background')), ('slider_textpanel_enable_description', models.BooleanField(default=True, help_text='Enable the description text.', verbose_name='enable description')), ('slider_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('slider_textpanel_fade_duration', models.PositiveSmallIntegerField(default=200, verbose_name='text panel fade duration ')), ('slider_textpanel_height', models.PositiveSmallIntegerField(blank=True, default=None, help_text='If blank it will be set dynamically.', null=True, verbose_name='text panel height ')), ('slider_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=10, verbose_name='text panel bottom padding')), ('slider_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, verbose_name='text panel left padding')), ('slider_textpanel_padding_right', models.PositiveSmallIntegerField(default=10, verbose_name='text panel right padding')), ('slider_textpanel_padding_title_description', models.PositiveSmallIntegerField(default=5, help_text='the space between the title and the description.', verbose_name='text panel title padding')), ('slider_textpanel_padding_top', models.PositiveSmallIntegerField(default=10, verbose_name='text panel top padding')), ('slider_textpanel_text_valign', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='text panel vertical alignment')), ('slider_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold title')), ('slider_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel title text color')), ('slider_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('slider_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='title font size (px)')), ('slider_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_transition', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='The transition of the slide change.', max_length=5, verbose_name='transition')), ('slider_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Transition easing function of slide change.', max_length=17, verbose_name='transition easing')), ('slider_transition_speed', models.PositiveSmallIntegerField(default=300, help_text='Transition duration of slide change.', verbose_name='transition speed')), ('slider_videoplay_button_type', models.CharField(choices=[('square', 'Square'), ('round', 'Round')], default='square', max_length=6, verbose_name='videoplay button type')), ('slider_zoompanel_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Zoom panel horizonatal alignment.', max_length=6, verbose_name='zoom panel alignment')), ('slider_zoompanel_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='zoom panel vertical alignment')), ('slider_zoompanel_offset_hor', models.PositiveSmallIntegerField(default=12, verbose_name='zoom panel horizontal offset')), ('slider_zoompanel_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='zoom panel vertical offset')), ('slider_zoompanel_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider zoom panel, if empty inherit from gallery skin.', max_length=255, verbose_name='zoom panel skin')), ('slider_zoom_max_ratio', models.PositiveSmallIntegerField(default=6, verbose_name='maximum zoom ratio')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=400, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_skin', models.CharField(choices=[('default', 'Default'), ('alexis', 'Alexis')], max_length=255, verbose_name='gallery skin')), ('gallery_width', models.PositiveSmallIntegerField(default=900, help_text='Gallery width.', verbose_name='width')), ('gallery_autoplay', models.BooleanField(default=False, help_text='Begin slideshow autoplay on start.', verbose_name='gallery autoplay')), ('gallery_carousel', models.BooleanField(default=True, help_text='Next button on last image goes to first image.', verbose_name='carousel')), ('gallery_control_keyboard', models.BooleanField(default=True, help_text='Enable / disable keyboard controls.', verbose_name='keyboard')), ('gallery_control_thumbs_mousewheel', models.BooleanField(default=False, help_text='Enable / disable the mousewheel.', verbose_name='mousewheel')), ('gallery_debug_errors', models.BooleanField(default=True, help_text='show error message when there is some error on the gallery area.', verbose_name='debug errors')), ('gallery_height', models.PositiveSmallIntegerField(default=500, help_text='Gallery height.', verbose_name='height')), ('gallery_images_preload_type', models.CharField(choices=[('all', 'load all the images first time'), ('minimal', 'only image nabours will be loaded each time'), ('visible', 'visible thumbs images will be loaded each time')], default='minimal', help_text='Preload type of the images.', max_length=8, verbose_name='preload type')), ('gallery_min_height', models.PositiveSmallIntegerField(default=300, help_text='Gallery minimal height when resizing.', verbose_name='minimum height')), ('gallery_pause_on_mouseover', models.BooleanField(default=False, help_text='Pause on mouseover when playing slideshow.', verbose_name='pause on mouseover')), ('gallery_play_interval', models.PositiveSmallIntegerField(default=3000, help_text='Play interval of the slideshow.', verbose_name='play interval')), ('gallery_preserve_ratio', models.BooleanField(default=True, help_text='Preserve aspect ratio on window resize.', verbose_name='carousel')), ('thumb_border_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='border color')), ('thumb_border_effect', models.BooleanField(default=True, help_text='Specify if the thumb has border.', verbose_name='border effect')), ('thumb_border_width', models.PositiveSmallIntegerField(default=0, verbose_name='border width')), ('thumb_color_overlay_effect', models.BooleanField(default=True, help_text='Thumbnail color overlay effect, release the overlay on mouseover and selected states.', verbose_name='color overlay effect')), ('thumb_fixed_size', models.BooleanField(choices=[(True, 'Fixed'), (False, 'Dynamic')], default=True, help_text='Fixed/dynamic thumbnail width.', verbose_name='sizing method')), ('thumb_height', models.PositiveSmallIntegerField(default=50, verbose_name='height')), ('thumb_image_overlay_effect', models.BooleanField(default=False, help_text='Images overlay effect on normal state only.', verbose_name='image overlay effect')), ('thumb_image_overlay_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image overlay type')), ('thumb_loader_type', models.CharField(choices=[('dark', 'Dark'), ('light', 'Light')], default='dark', max_length=8, verbose_name='loader type')), ('thumb_overlay_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='overlay color')), ('thumb_overlay_opacity', models.PositiveSmallIntegerField(default=40, help_text='Thumbnail overlay color opacity.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('thumb_overlay_reverse', models.BooleanField(default=False, help_text='Reverse the overlay, will be shown on selected state only.', verbose_name='overlay reverse')), ('thumb_over_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in mouseover state.', max_length=18, verbose_name='mouseover border color')), ('thumb_over_border_width', models.PositiveSmallIntegerField(default=0, help_text='Thumbnail border width in mouseover state.', verbose_name='mouseover border width')), ('thumb_round_corners_radius', models.PositiveSmallIntegerField(default=0, verbose_name='border radius')), ('thumb_selected_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in selected state.', max_length=18, verbose_name='selected border color')), ('thumb_selected_border_width', models.PositiveSmallIntegerField(default=1, help_text='Thumbnail border width in selected state.', verbose_name='selected border width')), ('thumb_show_loader', models.BooleanField(default=True, help_text='Show thumb loader while loading the thumb.', verbose_name='show loader')), ('thumb_transition_duration', models.PositiveSmallIntegerField(default=200, help_text='Thumbnail effect transition duration.', verbose_name='transition duration')), ('thumb_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutQuad', help_text='Thumb effect transition easing.', max_length=17, verbose_name='transition easing')), ('thumb_width', models.PositiveSmallIntegerField(default=88, verbose_name='width')), ('strippanel_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='background color of the strip wrapper, if not set, it will be taken from the CSS.', max_length=18, verbose_name='background color')), ('strippanel_buttons_role', models.CharField(choices=[('scroll_strip', 'Scroll strip'), ('advance_item', 'Advance item')], default='scroll_strip', help_text='Role of the side buttons.', max_length=12, verbose_name='buttons role')), ('strippanel_buttons_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the buttons, if empty inherit from gallery skin.', max_length=255, verbose_name='buttons skin')), ('strippanel_enable_buttons', models.BooleanField(default=False, help_text='Enable buttons from the sides of the panel.', verbose_name='enable buttons')), ('strippanel_enable_handle', models.BooleanField(default=True, verbose_name='enable handle')), ('strippanel_handle_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='top', help_text='Close handle tip alignment on the handle bar according panel orientation.', max_length=6, verbose_name='handle alignment')), ('strippanel_handle_offset', models.PositiveSmallIntegerField(default=0, help_text='Offset of handle bar according the valign.', verbose_name='handle offset')), ('strippanel_handle_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the handle, if empty inherit from gallery skin.', max_length=255, verbose_name='handle skin')), ('strippanel_padding_bottom', models.PositiveSmallIntegerField(default=8, help_text='Space from bottom of the panel.', verbose_name='bottom padding')), ('strippanel_padding_buttons', models.PositiveSmallIntegerField(default=2, help_text='Padding between the buttons and the panel.', verbose_name='buttons padding')), ('strippanel_padding_left', models.PositiveSmallIntegerField(default=0, help_text='Space from left of the panel.', verbose_name='left padding')), ('strippanel_padding_right', models.PositiveSmallIntegerField(default=0, help_text='Space from right of the panel.', verbose_name='right padding')), ('strippanel_padding_top', models.PositiveSmallIntegerField(default=8, help_text='Space from top of the panel.', verbose_name='top padding')), ('strip_control_avia', models.BooleanField(default=False, help_text='Move the strip according strip moseover position.', verbose_name='avia control')), ('strip_control_touch', models.BooleanField(default=True, help_text='Move the strip by dragging it.', verbose_name='touch control')), ('strip_scroll_to_thumb_duration', models.PositiveSmallIntegerField(default=500, help_text='Duration of scrolling to thumb.', verbose_name='scroll animation duration')), ('strip_scroll_to_thumb_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutCubic', help_text='Easing of scrolling to thumb animation.', max_length=17, verbose_name='scroll animation')), ('strip_space_between_thumbs', models.PositiveSmallIntegerField(default=6, verbose_name='space between thumbnails')), ('strip_thumbs_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Alignment of the thumbnails when smaller than the strip size.', max_length=6, verbose_name='thumbnail alignment')), ('strip_thumb_touch_sensetivity', models.PositiveSmallIntegerField(default=2, help_text='1: most sensetive; 100: least sensetive.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='touch sensitivity')), ('name', models.CharField(max_length=255, verbose_name='name')), ], options={ 'verbose_name': 'Compact theme unite options', 'verbose_name_plural': 'Compact theme unite options', }, ), migrations.CreateModel( name='DefaultThemePlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_aoxomoxoa_defaultthemeplugin', serialize=False, to='cms.CMSPlugin')), ('theme_hide_panel_under_width', models.PositiveSmallIntegerField(blank=True, default=480, help_text="Hide panel under certain browser width, if null, don't hide.", null=True, verbose_name='hide panel under width')), ('theme_enable_fullscreen_button', models.BooleanField(default=True, help_text='Show, hide the theme fullscreen button. The position in the theme is constant.', verbose_name='enable fullscreen button')), ('theme_enable_hidepanel_button', models.BooleanField(default=True, help_text='Show, hide the hidepanel button.', verbose_name='enable hidepanel button')), ('theme_enable_play_button', models.BooleanField(default=True, help_text='Show, hide the theme play button. The position in the theme is constant.', verbose_name='enable play button')), ('theme_enable_text_panel', models.BooleanField(default=True, help_text='Enable the panel text panel.', verbose_name='enable text panel')), ('theme_text_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='The align of the text in the textpanel.', max_length=6, verbose_name='text align')), ('theme_text_padding_left', models.PositiveSmallIntegerField(default=20, help_text='Left padding of the text in the textpanel.', verbose_name='text padding left')), ('theme_text_padding_right', models.PositiveSmallIntegerField(default=5, help_text='Right padding of the text in the textpanel.', verbose_name='text padding right')), ('theme_text_type', models.CharField(choices=[('title', 'Title'), ('description', 'Description')], default='title', help_text='text that will be shown on the text panel, title or description.', max_length=6, verbose_name='text align')), ('slider_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('slider_arrow_left_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Left arrow horizonal alignment.', max_length=6, verbose_name='left arrow alignment')), ('slider_arrow_left_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='left arrow vertical alignment')), ('slider_arrow_left_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='left arrow horizontal offset')), ('slider_arrow_left_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='left arrow vertical offset')), ('slider_arrow_right_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Right arrow horizonal alignment.', max_length=6, verbose_name='Right arrow alignment')), ('slider_arrow_right_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='light arrow vertical alignment')), ('slider_arrow_right_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='right arrow horizontal offset')), ('slider_arrow_right_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='right arrow vertical offset')), ('slider_bullets_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Bullets horizontal alignment.', max_length=6, verbose_name='bullets alignment')), ('slider_bullets_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='bottom', max_length=6, verbose_name='bullets vertical alignment')), ('slider_bullets_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='bullets horizontal offset')), ('slider_bullets_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='bullets vertical offset')), ('slider_bullets_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the bullets, if blank inherit from gallery skin.', max_length=255, verbose_name='bullets skin')), ('slider_bullets_space_between', models.SmallIntegerField(blank=True, default=None, help_text='Set the space between bullets. If blank then will be set default space from the skins.', null=True, verbose_name='space between bullets')), ('slider_controls_always_on', models.BooleanField(default=True, help_text='Controls are always on, false - show only on mouseover.', verbose_name='controls always on')), ('slider_controls_appear_duration', models.PositiveSmallIntegerField(default=300, help_text='The duration of appearing controls.', verbose_name='controls appear duration')), ('slider_controls_appear_ontap', models.BooleanField(default=True, help_text='Appear controls on tap event on touch devices.', verbose_name='controls appear on tap')), ('slider_control_swipe', models.BooleanField(default=True, verbose_name='enable swiping control')), ('slider_control_zoom', models.BooleanField(default=True, verbose_name='enable zooming control')), ('slider_enable_arrows', models.BooleanField(default=True, verbose_name='enable arrows')), ('slider_enable_bullets', models.BooleanField(default=False, verbose_name='enable bullets')), ('slider_enable_fullscreen_button', models.BooleanField(default=True, verbose_name='enable fullscreen button')), ('slider_enable_play_button', models.BooleanField(default=True, verbose_name='enable play button')), ('slider_enable_progress_indicator', models.BooleanField(default=True, verbose_name='enable progress indicator')), ('slider_enable_text_panel', models.BooleanField(default=False, verbose_name='enable text panel')), ('slider_enable_zoom_panel', models.BooleanField(default=True, help_text='Enable the zoom buttons, works together with zoom control.', verbose_name='enable zoom panel')), ('slider_fullscreen_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Fullscreen button horizonatal alignment.', max_length=6, verbose_name='fullscreen button alignment')), ('slider_fullscreen_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='fullscreen button vertical alignment')), ('slider_fullscreen_button_offset_hor', models.PositiveSmallIntegerField(default=11, verbose_name='fullscreen button horizontal offset')), ('slider_fullscreen_button_offset_vert', models.PositiveSmallIntegerField(default=9, verbose_name='fullscreen button vertical offset')), ('slider_fullscreen_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider fullscreen button, if empty inherit from gallery skin.', max_length=255, verbose_name='fullscreen button skin')), ('slider_item_padding_bottom', models.PositiveSmallIntegerField(default=0, verbose_name='slider item bottom padding')), ('slider_item_padding_left', models.PositiveSmallIntegerField(default=0, verbose_name='slider item left padding')), ('slider_item_padding_right', models.PositiveSmallIntegerField(default=0, verbose_name='slider item right padding')), ('slider_item_padding_top', models.PositiveSmallIntegerField(default=0, verbose_name='slider item top padding')), ('slider_loader_color', models.CharField(choices=[('white', 'White'), ('black', 'Black')], default='white', max_length=6, verbose_name='loader color')), ('slider_loader_type', models.PositiveSmallIntegerField(choices=[(1, 'Shape 1'), (2, 'Shape 2'), (3, 'Shape 3'), (4, 'Shape 4'), (5, 'Shape 5'), (6, 'Shape 6'), (7, 'Shape 7')], default=1, verbose_name='shape of the loader')), ('slider_play_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Play button horizonatal alignment.', max_length=6, verbose_name='play button alignment')), ('slider_play_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='play button vertical alignment')), ('slider_play_button_offset_hor', models.PositiveSmallIntegerField(default=40, verbose_name='play button horizontal offset')), ('slider_play_button_offset_vert', models.PositiveSmallIntegerField(default=8, verbose_name='play button vertical offset')), ('slider_play_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider play button, if empty inherit from gallery skin.', max_length=255, verbose_name='play button skin')), ('slider_progressbar_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='progress bar color')), ('slider_progressbar_line_width', models.PositiveSmallIntegerField(default=5, verbose_name='progress bar line width')), ('slider_progressbar_opacity', models.PositiveSmallIntegerField(default=60, help_text='The opacity of the progress bar.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='progress bar opacity (%)')), ('slider_progresspie_color1', colorfield.fields.ColorField(default='#B5B5B5', max_length=18, verbose_name='progress pie first color')), ('slider_progresspie_color2', colorfield.fields.ColorField(default='#E5E5E5', max_length=18, verbose_name='progress pie second color')), ('slider_progresspie_height', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie height')), ('slider_progresspie_stroke_width', models.PositiveSmallIntegerField(default=6, verbose_name='progress pie stroke width')), ('slider_progresspie_type_fill', models.BooleanField(choices=[(False, 'Stroke'), (True, 'Fill')], default=False, verbose_name='progress pie type')), ('slider_progresspie_width', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie width')), ('slider_progress_indicator_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Progress indicator horizonatal alignment.', max_length=6, verbose_name='progress indicator alignment')), ('slider_progress_indicator_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='progress indicator vertical alignment')), ('slider_progress_indicator_offset_hor', models.PositiveSmallIntegerField(default=16, verbose_name='progress indicator horizontal offset ')), ('slider_progress_indicator_offset_vert', models.PositiveSmallIntegerField(default=36, verbose_name='progress indicator vertical offset ')), ('slider_progress_indicator_type', models.CharField(choices=[('pie', 'Pie'), ('pie2', 'Pie 2'), ('bar', 'Bar')], default='pie', help_text='if pie not supported, it will switch to bar automatically.', max_length=4, verbose_name='progress indicator type')), ('slider_scale_mode', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode')), ('slider_scale_mode_fullscreen', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='down', max_length=4, verbose_name='fullscreen scale mode')), ('slider_scale_mode_media', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode on media items')), ('slider_textpanel_always_on', models.BooleanField(default=True, help_text='Text panel are always on or show only on mouseover.', verbose_name='text panel always on')), ('slider_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='text panel background color')), ('slider_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, verbose_name='text panel background CSS')), ('slider_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='text panel background opacity (%)')), ('slider_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='text panel description CSS')), ('slider_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='text panel title CSS')), ('slider_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('slider_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel description text color')), ('slider_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('slider_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('slider_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_textpanel_enable_bg', models.BooleanField(default=True, help_text='Enable the textpanel background.', verbose_name='enable background')), ('slider_textpanel_enable_description', models.BooleanField(default=True, help_text='Enable the description text.', verbose_name='enable description')), ('slider_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('slider_textpanel_fade_duration', models.PositiveSmallIntegerField(default=200, verbose_name='text panel fade duration ')), ('slider_textpanel_height', models.PositiveSmallIntegerField(blank=True, default=None, help_text='If blank it will be set dynamically.', null=True, verbose_name='text panel height ')), ('slider_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=10, verbose_name='text panel bottom padding')), ('slider_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, verbose_name='text panel left padding')), ('slider_textpanel_padding_right', models.PositiveSmallIntegerField(default=10, verbose_name='text panel right padding')), ('slider_textpanel_padding_title_description', models.PositiveSmallIntegerField(default=5, help_text='the space between the title and the description.', verbose_name='text panel title padding')), ('slider_textpanel_padding_top', models.PositiveSmallIntegerField(default=10, verbose_name='text panel top padding')), ('slider_textpanel_text_valign', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='text panel vertical alignment')), ('slider_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold title')), ('slider_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel title text color')), ('slider_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('slider_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='title font size (px)')), ('slider_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_transition', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='The transition of the slide change.', max_length=5, verbose_name='transition')), ('slider_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Transition easing function of slide change.', max_length=17, verbose_name='transition easing')), ('slider_transition_speed', models.PositiveSmallIntegerField(default=300, help_text='Transition duration of slide change.', verbose_name='transition speed')), ('slider_videoplay_button_type', models.CharField(choices=[('square', 'Square'), ('round', 'Round')], default='square', max_length=6, verbose_name='videoplay button type')), ('slider_zoompanel_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Zoom panel horizonatal alignment.', max_length=6, verbose_name='zoom panel alignment')), ('slider_zoompanel_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='zoom panel vertical alignment')), ('slider_zoompanel_offset_hor', models.PositiveSmallIntegerField(default=12, verbose_name='zoom panel horizontal offset')), ('slider_zoompanel_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='zoom panel vertical offset')), ('slider_zoompanel_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider zoom panel, if empty inherit from gallery skin.', max_length=255, verbose_name='zoom panel skin')), ('slider_zoom_max_ratio', models.PositiveSmallIntegerField(default=6, verbose_name='maximum zoom ratio')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=400, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_skin', models.CharField(choices=[('default', 'Default'), ('alexis', 'Alexis')], max_length=255, verbose_name='gallery skin')), ('gallery_width', models.PositiveSmallIntegerField(default=900, help_text='Gallery width.', verbose_name='width')), ('gallery_autoplay', models.BooleanField(default=False, help_text='Begin slideshow autoplay on start.', verbose_name='gallery autoplay')), ('gallery_carousel', models.BooleanField(default=True, help_text='Next button on last image goes to first image.', verbose_name='carousel')), ('gallery_control_keyboard', models.BooleanField(default=True, help_text='Enable / disable keyboard controls.', verbose_name='keyboard')), ('gallery_control_thumbs_mousewheel', models.BooleanField(default=False, help_text='Enable / disable the mousewheel.', verbose_name='mousewheel')), ('gallery_debug_errors', models.BooleanField(default=True, help_text='show error message when there is some error on the gallery area.', verbose_name='debug errors')), ('gallery_height', models.PositiveSmallIntegerField(default=500, help_text='Gallery height.', verbose_name='height')), ('gallery_images_preload_type', models.CharField(choices=[('all', 'load all the images first time'), ('minimal', 'only image nabours will be loaded each time'), ('visible', 'visible thumbs images will be loaded each time')], default='minimal', help_text='Preload type of the images.', max_length=8, verbose_name='preload type')), ('gallery_min_height', models.PositiveSmallIntegerField(default=300, help_text='Gallery minimal height when resizing.', verbose_name='minimum height')), ('gallery_pause_on_mouseover', models.BooleanField(default=False, help_text='Pause on mouseover when playing slideshow.', verbose_name='pause on mouseover')), ('gallery_play_interval', models.PositiveSmallIntegerField(default=3000, help_text='Play interval of the slideshow.', verbose_name='play interval')), ('gallery_preserve_ratio', models.BooleanField(default=True, help_text='Preserve aspect ratio on window resize.', verbose_name='carousel')), ('thumb_border_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='border color')), ('thumb_border_effect', models.BooleanField(default=True, help_text='Specify if the thumb has border.', verbose_name='border effect')), ('thumb_border_width', models.PositiveSmallIntegerField(default=0, verbose_name='border width')), ('thumb_color_overlay_effect', models.BooleanField(default=True, help_text='Thumbnail color overlay effect, release the overlay on mouseover and selected states.', verbose_name='color overlay effect')), ('thumb_fixed_size', models.BooleanField(choices=[(True, 'Fixed'), (False, 'Dynamic')], default=True, help_text='Fixed/dynamic thumbnail width.', verbose_name='sizing method')), ('thumb_height', models.PositiveSmallIntegerField(default=50, verbose_name='height')), ('thumb_image_overlay_effect', models.BooleanField(default=False, help_text='Images overlay effect on normal state only.', verbose_name='image overlay effect')), ('thumb_image_overlay_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image overlay type')), ('thumb_loader_type', models.CharField(choices=[('dark', 'Dark'), ('light', 'Light')], default='dark', max_length=8, verbose_name='loader type')), ('thumb_overlay_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='overlay color')), ('thumb_overlay_opacity', models.PositiveSmallIntegerField(default=40, help_text='Thumbnail overlay color opacity.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('thumb_overlay_reverse', models.BooleanField(default=False, help_text='Reverse the overlay, will be shown on selected state only.', verbose_name='overlay reverse')), ('thumb_over_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in mouseover state.', max_length=18, verbose_name='mouseover border color')), ('thumb_over_border_width', models.PositiveSmallIntegerField(default=0, help_text='Thumbnail border width in mouseover state.', verbose_name='mouseover border width')), ('thumb_round_corners_radius', models.PositiveSmallIntegerField(default=0, verbose_name='border radius')), ('thumb_selected_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in selected state.', max_length=18, verbose_name='selected border color')), ('thumb_selected_border_width', models.PositiveSmallIntegerField(default=1, help_text='Thumbnail border width in selected state.', verbose_name='selected border width')), ('thumb_show_loader', models.BooleanField(default=True, help_text='Show thumb loader while loading the thumb.', verbose_name='show loader')), ('thumb_transition_duration', models.PositiveSmallIntegerField(default=200, help_text='Thumbnail effect transition duration.', verbose_name='transition duration')), ('thumb_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutQuad', help_text='Thumb effect transition easing.', max_length=17, verbose_name='transition easing')), ('thumb_width', models.PositiveSmallIntegerField(default=88, verbose_name='width')), ('strippanel_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='background color of the strip wrapper, if not set, it will be taken from the CSS.', max_length=18, verbose_name='background color')), ('strippanel_buttons_role', models.CharField(choices=[('scroll_strip', 'Scroll strip'), ('advance_item', 'Advance item')], default='scroll_strip', help_text='Role of the side buttons.', max_length=12, verbose_name='buttons role')), ('strippanel_buttons_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the buttons, if empty inherit from gallery skin.', max_length=255, verbose_name='buttons skin')), ('strippanel_enable_buttons', models.BooleanField(default=False, help_text='Enable buttons from the sides of the panel.', verbose_name='enable buttons')), ('strippanel_enable_handle', models.BooleanField(default=True, verbose_name='enable handle')), ('strippanel_handle_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='top', help_text='Close handle tip alignment on the handle bar according panel orientation.', max_length=6, verbose_name='handle alignment')), ('strippanel_handle_offset', models.PositiveSmallIntegerField(default=0, help_text='Offset of handle bar according the valign.', verbose_name='handle offset')), ('strippanel_handle_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the handle, if empty inherit from gallery skin.', max_length=255, verbose_name='handle skin')), ('strippanel_padding_bottom', models.PositiveSmallIntegerField(default=8, help_text='Space from bottom of the panel.', verbose_name='bottom padding')), ('strippanel_padding_buttons', models.PositiveSmallIntegerField(default=2, help_text='Padding between the buttons and the panel.', verbose_name='buttons padding')), ('strippanel_padding_left', models.PositiveSmallIntegerField(default=0, help_text='Space from left of the panel.', verbose_name='left padding')), ('strippanel_padding_right', models.PositiveSmallIntegerField(default=0, help_text='Space from right of the panel.', verbose_name='right padding')), ('strippanel_padding_top', models.PositiveSmallIntegerField(default=8, help_text='Space from top of the panel.', verbose_name='top padding')), ('strip_control_avia', models.BooleanField(default=False, help_text='Move the strip according strip moseover position.', verbose_name='avia control')), ('strip_control_touch', models.BooleanField(default=True, help_text='Move the strip by dragging it.', verbose_name='touch control')), ('strip_scroll_to_thumb_duration', models.PositiveSmallIntegerField(default=500, help_text='Duration of scrolling to thumb.', verbose_name='scroll animation duration')), ('strip_scroll_to_thumb_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutCubic', help_text='Easing of scrolling to thumb animation.', max_length=17, verbose_name='scroll animation')), ('strip_space_between_thumbs', models.PositiveSmallIntegerField(default=6, verbose_name='space between thumbnails')), ('strip_thumbs_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Alignment of the thumbnails when smaller than the strip size.', max_length=6, verbose_name='thumbnail alignment')), ('strip_thumb_touch_sensetivity', models.PositiveSmallIntegerField(default=2, help_text='1: most sensetive; 100: least sensetive.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='touch sensitivity')), ('slug', models.SlugField(verbose_name='slug')), ('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')), ('full_thumbnail_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='defaulttheme_unite_plugins_full', related_query_name='defaulttheme_unite_plugin_full', to='filer.ThumbnailOption', verbose_name='fullscreen thumbnail option')), ], options={ 'verbose_name': 'Default theme plugin', 'verbose_name_plural': 'Default theme plugins', }, bases=('cms.cmsplugin', models.Model), ), migrations.CreateModel( name='DefaultThemeSavedUniteOptions', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('theme_hide_panel_under_width', models.PositiveSmallIntegerField(blank=True, default=480, help_text="Hide panel under certain browser width, if null, don't hide.", null=True, verbose_name='hide panel under width')), ('theme_enable_fullscreen_button', models.BooleanField(default=True, help_text='Show, hide the theme fullscreen button. The position in the theme is constant.', verbose_name='enable fullscreen button')), ('theme_enable_hidepanel_button', models.BooleanField(default=True, help_text='Show, hide the hidepanel button.', verbose_name='enable hidepanel button')), ('theme_enable_play_button', models.BooleanField(default=True, help_text='Show, hide the theme play button. The position in the theme is constant.', verbose_name='enable play button')), ('theme_enable_text_panel', models.BooleanField(default=True, help_text='Enable the panel text panel.', verbose_name='enable text panel')), ('theme_text_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='The align of the text in the textpanel.', max_length=6, verbose_name='text align')), ('theme_text_padding_left', models.PositiveSmallIntegerField(default=20, help_text='Left padding of the text in the textpanel.', verbose_name='text padding left')), ('theme_text_padding_right', models.PositiveSmallIntegerField(default=5, help_text='Right padding of the text in the textpanel.', verbose_name='text padding right')), ('theme_text_type', models.CharField(choices=[('title', 'Title'), ('description', 'Description')], default='title', help_text='text that will be shown on the text panel, title or description.', max_length=6, verbose_name='text align')), ('slider_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('slider_arrow_left_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Left arrow horizonal alignment.', max_length=6, verbose_name='left arrow alignment')), ('slider_arrow_left_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='left arrow vertical alignment')), ('slider_arrow_left_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='left arrow horizontal offset')), ('slider_arrow_left_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='left arrow vertical offset')), ('slider_arrow_right_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Right arrow horizonal alignment.', max_length=6, verbose_name='Right arrow alignment')), ('slider_arrow_right_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='light arrow vertical alignment')), ('slider_arrow_right_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='right arrow horizontal offset')), ('slider_arrow_right_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='right arrow vertical offset')), ('slider_bullets_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Bullets horizontal alignment.', max_length=6, verbose_name='bullets alignment')), ('slider_bullets_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='bottom', max_length=6, verbose_name='bullets vertical alignment')), ('slider_bullets_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='bullets horizontal offset')), ('slider_bullets_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='bullets vertical offset')), ('slider_bullets_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the bullets, if blank inherit from gallery skin.', max_length=255, verbose_name='bullets skin')), ('slider_bullets_space_between', models.SmallIntegerField(blank=True, default=None, help_text='Set the space between bullets. If blank then will be set default space from the skins.', null=True, verbose_name='space between bullets')), ('slider_controls_always_on', models.BooleanField(default=True, help_text='Controls are always on, false - show only on mouseover.', verbose_name='controls always on')), ('slider_controls_appear_duration', models.PositiveSmallIntegerField(default=300, help_text='The duration of appearing controls.', verbose_name='controls appear duration')), ('slider_controls_appear_ontap', models.BooleanField(default=True, help_text='Appear controls on tap event on touch devices.', verbose_name='controls appear on tap')), ('slider_control_swipe', models.BooleanField(default=True, verbose_name='enable swiping control')), ('slider_control_zoom', models.BooleanField(default=True, verbose_name='enable zooming control')), ('slider_enable_arrows', models.BooleanField(default=True, verbose_name='enable arrows')), ('slider_enable_bullets', models.BooleanField(default=False, verbose_name='enable bullets')), ('slider_enable_fullscreen_button', models.BooleanField(default=True, verbose_name='enable fullscreen button')), ('slider_enable_play_button', models.BooleanField(default=True, verbose_name='enable play button')), ('slider_enable_progress_indicator', models.BooleanField(default=True, verbose_name='enable progress indicator')), ('slider_enable_text_panel', models.BooleanField(default=False, verbose_name='enable text panel')), ('slider_enable_zoom_panel', models.BooleanField(default=True, help_text='Enable the zoom buttons, works together with zoom control.', verbose_name='enable zoom panel')), ('slider_fullscreen_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Fullscreen button horizonatal alignment.', max_length=6, verbose_name='fullscreen button alignment')), ('slider_fullscreen_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='fullscreen button vertical alignment')), ('slider_fullscreen_button_offset_hor', models.PositiveSmallIntegerField(default=11, verbose_name='fullscreen button horizontal offset')), ('slider_fullscreen_button_offset_vert', models.PositiveSmallIntegerField(default=9, verbose_name='fullscreen button vertical offset')), ('slider_fullscreen_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider fullscreen button, if empty inherit from gallery skin.', max_length=255, verbose_name='fullscreen button skin')), ('slider_item_padding_bottom', models.PositiveSmallIntegerField(default=0, verbose_name='slider item bottom padding')), ('slider_item_padding_left', models.PositiveSmallIntegerField(default=0, verbose_name='slider item left padding')), ('slider_item_padding_right', models.PositiveSmallIntegerField(default=0, verbose_name='slider item right padding')), ('slider_item_padding_top', models.PositiveSmallIntegerField(default=0, verbose_name='slider item top padding')), ('slider_loader_color', models.CharField(choices=[('white', 'White'), ('black', 'Black')], default='white', max_length=6, verbose_name='loader color')), ('slider_loader_type', models.PositiveSmallIntegerField(choices=[(1, 'Shape 1'), (2, 'Shape 2'), (3, 'Shape 3'), (4, 'Shape 4'), (5, 'Shape 5'), (6, 'Shape 6'), (7, 'Shape 7')], default=1, verbose_name='shape of the loader')), ('slider_play_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Play button horizonatal alignment.', max_length=6, verbose_name='play button alignment')), ('slider_play_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='play button vertical alignment')), ('slider_play_button_offset_hor', models.PositiveSmallIntegerField(default=40, verbose_name='play button horizontal offset')), ('slider_play_button_offset_vert', models.PositiveSmallIntegerField(default=8, verbose_name='play button vertical offset')), ('slider_play_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider play button, if empty inherit from gallery skin.', max_length=255, verbose_name='play button skin')), ('slider_progressbar_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='progress bar color')), ('slider_progressbar_line_width', models.PositiveSmallIntegerField(default=5, verbose_name='progress bar line width')), ('slider_progressbar_opacity', models.PositiveSmallIntegerField(default=60, help_text='The opacity of the progress bar.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='progress bar opacity (%)')), ('slider_progresspie_color1', colorfield.fields.ColorField(default='#B5B5B5', max_length=18, verbose_name='progress pie first color')), ('slider_progresspie_color2', colorfield.fields.ColorField(default='#E5E5E5', max_length=18, verbose_name='progress pie second color')), ('slider_progresspie_height', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie height')), ('slider_progresspie_stroke_width', models.PositiveSmallIntegerField(default=6, verbose_name='progress pie stroke width')), ('slider_progresspie_type_fill', models.BooleanField(choices=[(False, 'Stroke'), (True, 'Fill')], default=False, verbose_name='progress pie type')), ('slider_progresspie_width', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie width')), ('slider_progress_indicator_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Progress indicator horizonatal alignment.', max_length=6, verbose_name='progress indicator alignment')), ('slider_progress_indicator_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='progress indicator vertical alignment')), ('slider_progress_indicator_offset_hor', models.PositiveSmallIntegerField(default=16, verbose_name='progress indicator horizontal offset ')), ('slider_progress_indicator_offset_vert', models.PositiveSmallIntegerField(default=36, verbose_name='progress indicator vertical offset ')), ('slider_progress_indicator_type', models.CharField(choices=[('pie', 'Pie'), ('pie2', 'Pie 2'), ('bar', 'Bar')], default='pie', help_text='if pie not supported, it will switch to bar automatically.', max_length=4, verbose_name='progress indicator type')), ('slider_scale_mode', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode')), ('slider_scale_mode_fullscreen', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='down', max_length=4, verbose_name='fullscreen scale mode')), ('slider_scale_mode_media', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode on media items')), ('slider_textpanel_always_on', models.BooleanField(default=True, help_text='Text panel are always on or show only on mouseover.', verbose_name='text panel always on')), ('slider_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='text panel background color')), ('slider_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, verbose_name='text panel background CSS')), ('slider_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='text panel background opacity (%)')), ('slider_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='text panel description CSS')), ('slider_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='text panel title CSS')), ('slider_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('slider_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel description text color')), ('slider_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('slider_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('slider_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_textpanel_enable_bg', models.BooleanField(default=True, help_text='Enable the textpanel background.', verbose_name='enable background')), ('slider_textpanel_enable_description', models.BooleanField(default=True, help_text='Enable the description text.', verbose_name='enable description')), ('slider_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('slider_textpanel_fade_duration', models.PositiveSmallIntegerField(default=200, verbose_name='text panel fade duration ')), ('slider_textpanel_height', models.PositiveSmallIntegerField(blank=True, default=None, help_text='If blank it will be set dynamically.', null=True, verbose_name='text panel height ')), ('slider_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=10, verbose_name='text panel bottom padding')), ('slider_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, verbose_name='text panel left padding')), ('slider_textpanel_padding_right', models.PositiveSmallIntegerField(default=10, verbose_name='text panel right padding')), ('slider_textpanel_padding_title_description', models.PositiveSmallIntegerField(default=5, help_text='the space between the title and the description.', verbose_name='text panel title padding')), ('slider_textpanel_padding_top', models.PositiveSmallIntegerField(default=10, verbose_name='text panel top padding')), ('slider_textpanel_text_valign', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='text panel vertical alignment')), ('slider_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold title')), ('slider_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel title text color')), ('slider_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('slider_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='title font size (px)')), ('slider_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_transition', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='The transition of the slide change.', max_length=5, verbose_name='transition')), ('slider_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Transition easing function of slide change.', max_length=17, verbose_name='transition easing')), ('slider_transition_speed', models.PositiveSmallIntegerField(default=300, help_text='Transition duration of slide change.', verbose_name='transition speed')), ('slider_videoplay_button_type', models.CharField(choices=[('square', 'Square'), ('round', 'Round')], default='square', max_length=6, verbose_name='videoplay button type')), ('slider_zoompanel_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Zoom panel horizonatal alignment.', max_length=6, verbose_name='zoom panel alignment')), ('slider_zoompanel_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='zoom panel vertical alignment')), ('slider_zoompanel_offset_hor', models.PositiveSmallIntegerField(default=12, verbose_name='zoom panel horizontal offset')), ('slider_zoompanel_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='zoom panel vertical offset')), ('slider_zoompanel_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider zoom panel, if empty inherit from gallery skin.', max_length=255, verbose_name='zoom panel skin')), ('slider_zoom_max_ratio', models.PositiveSmallIntegerField(default=6, verbose_name='maximum zoom ratio')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=400, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_skin', models.CharField(choices=[('default', 'Default'), ('alexis', 'Alexis')], max_length=255, verbose_name='gallery skin')), ('gallery_width', models.PositiveSmallIntegerField(default=900, help_text='Gallery width.', verbose_name='width')), ('gallery_autoplay', models.BooleanField(default=False, help_text='Begin slideshow autoplay on start.', verbose_name='gallery autoplay')), ('gallery_carousel', models.BooleanField(default=True, help_text='Next button on last image goes to first image.', verbose_name='carousel')), ('gallery_control_keyboard', models.BooleanField(default=True, help_text='Enable / disable keyboard controls.', verbose_name='keyboard')), ('gallery_control_thumbs_mousewheel', models.BooleanField(default=False, help_text='Enable / disable the mousewheel.', verbose_name='mousewheel')), ('gallery_debug_errors', models.BooleanField(default=True, help_text='show error message when there is some error on the gallery area.', verbose_name='debug errors')), ('gallery_height', models.PositiveSmallIntegerField(default=500, help_text='Gallery height.', verbose_name='height')), ('gallery_images_preload_type', models.CharField(choices=[('all', 'load all the images first time'), ('minimal', 'only image nabours will be loaded each time'), ('visible', 'visible thumbs images will be loaded each time')], default='minimal', help_text='Preload type of the images.', max_length=8, verbose_name='preload type')), ('gallery_min_height', models.PositiveSmallIntegerField(default=300, help_text='Gallery minimal height when resizing.', verbose_name='minimum height')), ('gallery_pause_on_mouseover', models.BooleanField(default=False, help_text='Pause on mouseover when playing slideshow.', verbose_name='pause on mouseover')), ('gallery_play_interval', models.PositiveSmallIntegerField(default=3000, help_text='Play interval of the slideshow.', verbose_name='play interval')), ('gallery_preserve_ratio', models.BooleanField(default=True, help_text='Preserve aspect ratio on window resize.', verbose_name='carousel')), ('thumb_border_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='border color')), ('thumb_border_effect', models.BooleanField(default=True, help_text='Specify if the thumb has border.', verbose_name='border effect')), ('thumb_border_width', models.PositiveSmallIntegerField(default=0, verbose_name='border width')), ('thumb_color_overlay_effect', models.BooleanField(default=True, help_text='Thumbnail color overlay effect, release the overlay on mouseover and selected states.', verbose_name='color overlay effect')), ('thumb_fixed_size', models.BooleanField(choices=[(True, 'Fixed'), (False, 'Dynamic')], default=True, help_text='Fixed/dynamic thumbnail width.', verbose_name='sizing method')), ('thumb_height', models.PositiveSmallIntegerField(default=50, verbose_name='height')), ('thumb_image_overlay_effect', models.BooleanField(default=False, help_text='Images overlay effect on normal state only.', verbose_name='image overlay effect')), ('thumb_image_overlay_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image overlay type')), ('thumb_loader_type', models.CharField(choices=[('dark', 'Dark'), ('light', 'Light')], default='dark', max_length=8, verbose_name='loader type')), ('thumb_overlay_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='overlay color')), ('thumb_overlay_opacity', models.PositiveSmallIntegerField(default=40, help_text='Thumbnail overlay color opacity.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('thumb_overlay_reverse', models.BooleanField(default=False, help_text='Reverse the overlay, will be shown on selected state only.', verbose_name='overlay reverse')), ('thumb_over_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in mouseover state.', max_length=18, verbose_name='mouseover border color')), ('thumb_over_border_width', models.PositiveSmallIntegerField(default=0, help_text='Thumbnail border width in mouseover state.', verbose_name='mouseover border width')), ('thumb_round_corners_radius', models.PositiveSmallIntegerField(default=0, verbose_name='border radius')), ('thumb_selected_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in selected state.', max_length=18, verbose_name='selected border color')), ('thumb_selected_border_width', models.PositiveSmallIntegerField(default=1, help_text='Thumbnail border width in selected state.', verbose_name='selected border width')), ('thumb_show_loader', models.BooleanField(default=True, help_text='Show thumb loader while loading the thumb.', verbose_name='show loader')), ('thumb_transition_duration', models.PositiveSmallIntegerField(default=200, help_text='Thumbnail effect transition duration.', verbose_name='transition duration')), ('thumb_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutQuad', help_text='Thumb effect transition easing.', max_length=17, verbose_name='transition easing')), ('thumb_width', models.PositiveSmallIntegerField(default=88, verbose_name='width')), ('strippanel_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='background color of the strip wrapper, if not set, it will be taken from the CSS.', max_length=18, verbose_name='background color')), ('strippanel_buttons_role', models.CharField(choices=[('scroll_strip', 'Scroll strip'), ('advance_item', 'Advance item')], default='scroll_strip', help_text='Role of the side buttons.', max_length=12, verbose_name='buttons role')), ('strippanel_buttons_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the buttons, if empty inherit from gallery skin.', max_length=255, verbose_name='buttons skin')), ('strippanel_enable_buttons', models.BooleanField(default=False, help_text='Enable buttons from the sides of the panel.', verbose_name='enable buttons')), ('strippanel_enable_handle', models.BooleanField(default=True, verbose_name='enable handle')), ('strippanel_handle_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='top', help_text='Close handle tip alignment on the handle bar according panel orientation.', max_length=6, verbose_name='handle alignment')), ('strippanel_handle_offset', models.PositiveSmallIntegerField(default=0, help_text='Offset of handle bar according the valign.', verbose_name='handle offset')), ('strippanel_handle_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the handle, if empty inherit from gallery skin.', max_length=255, verbose_name='handle skin')), ('strippanel_padding_bottom', models.PositiveSmallIntegerField(default=8, help_text='Space from bottom of the panel.', verbose_name='bottom padding')), ('strippanel_padding_buttons', models.PositiveSmallIntegerField(default=2, help_text='Padding between the buttons and the panel.', verbose_name='buttons padding')), ('strippanel_padding_left', models.PositiveSmallIntegerField(default=0, help_text='Space from left of the panel.', verbose_name='left padding')), ('strippanel_padding_right', models.PositiveSmallIntegerField(default=0, help_text='Space from right of the panel.', verbose_name='right padding')), ('strippanel_padding_top', models.PositiveSmallIntegerField(default=8, help_text='Space from top of the panel.', verbose_name='top padding')), ('strip_control_avia', models.BooleanField(default=False, help_text='Move the strip according strip moseover position.', verbose_name='avia control')), ('strip_control_touch', models.BooleanField(default=True, help_text='Move the strip by dragging it.', verbose_name='touch control')), ('strip_scroll_to_thumb_duration', models.PositiveSmallIntegerField(default=500, help_text='Duration of scrolling to thumb.', verbose_name='scroll animation duration')), ('strip_scroll_to_thumb_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutCubic', help_text='Easing of scrolling to thumb animation.', max_length=17, verbose_name='scroll animation')), ('strip_space_between_thumbs', models.PositiveSmallIntegerField(default=6, verbose_name='space between thumbnails')), ('strip_thumbs_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Alignment of the thumbnails when smaller than the strip size.', max_length=6, verbose_name='thumbnail alignment')), ('strip_thumb_touch_sensetivity', models.PositiveSmallIntegerField(default=2, help_text='1: most sensetive; 100: least sensetive.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='touch sensitivity')), ('name', models.CharField(max_length=255, verbose_name='name')), ], options={ 'verbose_name': 'Default theme unite options', 'verbose_name_plural': 'Default theme unite options', }, ), migrations.CreateModel( name='GridThemePlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_aoxomoxoa_gridthemeplugin', serialize=False, to='cms.CMSPlugin')), ('theme_hide_panel_under_width', models.PositiveSmallIntegerField(blank=True, default=480, help_text="Hide panel under certain browser width, if null, don't hide.", null=True, verbose_name='hide panel under width')), ('theme_panel_position', models.CharField(choices=[('top', 'Top'), ('bottom', 'Bottom'), ('left', 'Left'), ('right', 'Right')], default='right', help_text='Thumbs panel position.', max_length=6, verbose_name='panel position')), ('slider_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('slider_arrow_left_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Left arrow horizonal alignment.', max_length=6, verbose_name='left arrow alignment')), ('slider_arrow_left_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='left arrow vertical alignment')), ('slider_arrow_left_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='left arrow horizontal offset')), ('slider_arrow_left_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='left arrow vertical offset')), ('slider_arrow_right_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Right arrow horizonal alignment.', max_length=6, verbose_name='Right arrow alignment')), ('slider_arrow_right_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='light arrow vertical alignment')), ('slider_arrow_right_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='right arrow horizontal offset')), ('slider_arrow_right_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='right arrow vertical offset')), ('slider_bullets_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Bullets horizontal alignment.', max_length=6, verbose_name='bullets alignment')), ('slider_bullets_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='bottom', max_length=6, verbose_name='bullets vertical alignment')), ('slider_bullets_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='bullets horizontal offset')), ('slider_bullets_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='bullets vertical offset')), ('slider_bullets_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the bullets, if blank inherit from gallery skin.', max_length=255, verbose_name='bullets skin')), ('slider_bullets_space_between', models.SmallIntegerField(blank=True, default=None, help_text='Set the space between bullets. If blank then will be set default space from the skins.', null=True, verbose_name='space between bullets')), ('slider_controls_always_on', models.BooleanField(default=True, help_text='Controls are always on, false - show only on mouseover.', verbose_name='controls always on')), ('slider_controls_appear_duration', models.PositiveSmallIntegerField(default=300, help_text='The duration of appearing controls.', verbose_name='controls appear duration')), ('slider_controls_appear_ontap', models.BooleanField(default=True, help_text='Appear controls on tap event on touch devices.', verbose_name='controls appear on tap')), ('slider_control_swipe', models.BooleanField(default=True, verbose_name='enable swiping control')), ('slider_control_zoom', models.BooleanField(default=True, verbose_name='enable zooming control')), ('slider_enable_arrows', models.BooleanField(default=True, verbose_name='enable arrows')), ('slider_enable_bullets', models.BooleanField(default=False, verbose_name='enable bullets')), ('slider_enable_fullscreen_button', models.BooleanField(default=True, verbose_name='enable fullscreen button')), ('slider_enable_play_button', models.BooleanField(default=True, verbose_name='enable play button')), ('slider_enable_progress_indicator', models.BooleanField(default=True, verbose_name='enable progress indicator')), ('slider_enable_text_panel', models.BooleanField(default=False, verbose_name='enable text panel')), ('slider_enable_zoom_panel', models.BooleanField(default=True, help_text='Enable the zoom buttons, works together with zoom control.', verbose_name='enable zoom panel')), ('slider_fullscreen_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Fullscreen button horizonatal alignment.', max_length=6, verbose_name='fullscreen button alignment')), ('slider_fullscreen_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='fullscreen button vertical alignment')), ('slider_fullscreen_button_offset_hor', models.PositiveSmallIntegerField(default=11, verbose_name='fullscreen button horizontal offset')), ('slider_fullscreen_button_offset_vert', models.PositiveSmallIntegerField(default=9, verbose_name='fullscreen button vertical offset')), ('slider_fullscreen_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider fullscreen button, if empty inherit from gallery skin.', max_length=255, verbose_name='fullscreen button skin')), ('slider_item_padding_bottom', models.PositiveSmallIntegerField(default=0, verbose_name='slider item bottom padding')), ('slider_item_padding_left', models.PositiveSmallIntegerField(default=0, verbose_name='slider item left padding')), ('slider_item_padding_right', models.PositiveSmallIntegerField(default=0, verbose_name='slider item right padding')), ('slider_item_padding_top', models.PositiveSmallIntegerField(default=0, verbose_name='slider item top padding')), ('slider_loader_color', models.CharField(choices=[('white', 'White'), ('black', 'Black')], default='white', max_length=6, verbose_name='loader color')), ('slider_loader_type', models.PositiveSmallIntegerField(choices=[(1, 'Shape 1'), (2, 'Shape 2'), (3, 'Shape 3'), (4, 'Shape 4'), (5, 'Shape 5'), (6, 'Shape 6'), (7, 'Shape 7')], default=1, verbose_name='shape of the loader')), ('slider_play_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Play button horizonatal alignment.', max_length=6, verbose_name='play button alignment')), ('slider_play_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='play button vertical alignment')), ('slider_play_button_offset_hor', models.PositiveSmallIntegerField(default=40, verbose_name='play button horizontal offset')), ('slider_play_button_offset_vert', models.PositiveSmallIntegerField(default=8, verbose_name='play button vertical offset')), ('slider_play_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider play button, if empty inherit from gallery skin.', max_length=255, verbose_name='play button skin')), ('slider_progressbar_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='progress bar color')), ('slider_progressbar_line_width', models.PositiveSmallIntegerField(default=5, verbose_name='progress bar line width')), ('slider_progressbar_opacity', models.PositiveSmallIntegerField(default=60, help_text='The opacity of the progress bar.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='progress bar opacity (%)')), ('slider_progresspie_color1', colorfield.fields.ColorField(default='#B5B5B5', max_length=18, verbose_name='progress pie first color')), ('slider_progresspie_color2', colorfield.fields.ColorField(default='#E5E5E5', max_length=18, verbose_name='progress pie second color')), ('slider_progresspie_height', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie height')), ('slider_progresspie_stroke_width', models.PositiveSmallIntegerField(default=6, verbose_name='progress pie stroke width')), ('slider_progresspie_type_fill', models.BooleanField(choices=[(False, 'Stroke'), (True, 'Fill')], default=False, verbose_name='progress pie type')), ('slider_progresspie_width', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie width')), ('slider_progress_indicator_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Progress indicator horizonatal alignment.', max_length=6, verbose_name='progress indicator alignment')), ('slider_progress_indicator_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='progress indicator vertical alignment')), ('slider_progress_indicator_offset_hor', models.PositiveSmallIntegerField(default=16, verbose_name='progress indicator horizontal offset ')), ('slider_progress_indicator_offset_vert', models.PositiveSmallIntegerField(default=36, verbose_name='progress indicator vertical offset ')), ('slider_progress_indicator_type', models.CharField(choices=[('pie', 'Pie'), ('pie2', 'Pie 2'), ('bar', 'Bar')], default='pie', help_text='if pie not supported, it will switch to bar automatically.', max_length=4, verbose_name='progress indicator type')), ('slider_scale_mode', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode')), ('slider_scale_mode_fullscreen', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='down', max_length=4, verbose_name='fullscreen scale mode')), ('slider_scale_mode_media', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode on media items')), ('slider_textpanel_always_on', models.BooleanField(default=True, help_text='Text panel are always on or show only on mouseover.', verbose_name='text panel always on')), ('slider_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='text panel background color')), ('slider_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, verbose_name='text panel background CSS')), ('slider_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='text panel background opacity (%)')), ('slider_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='text panel description CSS')), ('slider_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='text panel title CSS')), ('slider_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('slider_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel description text color')), ('slider_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('slider_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('slider_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_textpanel_enable_bg', models.BooleanField(default=True, help_text='Enable the textpanel background.', verbose_name='enable background')), ('slider_textpanel_enable_description', models.BooleanField(default=True, help_text='Enable the description text.', verbose_name='enable description')), ('slider_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('slider_textpanel_fade_duration', models.PositiveSmallIntegerField(default=200, verbose_name='text panel fade duration ')), ('slider_textpanel_height', models.PositiveSmallIntegerField(blank=True, default=None, help_text='If blank it will be set dynamically.', null=True, verbose_name='text panel height ')), ('slider_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=10, verbose_name='text panel bottom padding')), ('slider_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, verbose_name='text panel left padding')), ('slider_textpanel_padding_right', models.PositiveSmallIntegerField(default=10, verbose_name='text panel right padding')), ('slider_textpanel_padding_title_description', models.PositiveSmallIntegerField(default=5, help_text='the space between the title and the description.', verbose_name='text panel title padding')), ('slider_textpanel_padding_top', models.PositiveSmallIntegerField(default=10, verbose_name='text panel top padding')), ('slider_textpanel_text_valign', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='text panel vertical alignment')), ('slider_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold title')), ('slider_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel title text color')), ('slider_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('slider_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='title font size (px)')), ('slider_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_transition', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='The transition of the slide change.', max_length=5, verbose_name='transition')), ('slider_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Transition easing function of slide change.', max_length=17, verbose_name='transition easing')), ('slider_transition_speed', models.PositiveSmallIntegerField(default=300, help_text='Transition duration of slide change.', verbose_name='transition speed')), ('slider_videoplay_button_type', models.CharField(choices=[('square', 'Square'), ('round', 'Round')], default='square', max_length=6, verbose_name='videoplay button type')), ('slider_zoompanel_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Zoom panel horizonatal alignment.', max_length=6, verbose_name='zoom panel alignment')), ('slider_zoompanel_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='zoom panel vertical alignment')), ('slider_zoompanel_offset_hor', models.PositiveSmallIntegerField(default=12, verbose_name='zoom panel horizontal offset')), ('slider_zoompanel_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='zoom panel vertical offset')), ('slider_zoompanel_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider zoom panel, if empty inherit from gallery skin.', max_length=255, verbose_name='zoom panel skin')), ('slider_zoom_max_ratio', models.PositiveSmallIntegerField(default=6, verbose_name='maximum zoom ratio')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=400, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_skin', models.CharField(choices=[('default', 'Default'), ('alexis', 'Alexis')], max_length=255, verbose_name='gallery skin')), ('gallery_width', models.PositiveSmallIntegerField(default=900, help_text='Gallery width.', verbose_name='width')), ('gallery_autoplay', models.BooleanField(default=False, help_text='Begin slideshow autoplay on start.', verbose_name='gallery autoplay')), ('gallery_carousel', models.BooleanField(default=True, help_text='Next button on last image goes to first image.', verbose_name='carousel')), ('gallery_control_keyboard', models.BooleanField(default=True, help_text='Enable / disable keyboard controls.', verbose_name='keyboard')), ('gallery_control_thumbs_mousewheel', models.BooleanField(default=False, help_text='Enable / disable the mousewheel.', verbose_name='mousewheel')), ('gallery_debug_errors', models.BooleanField(default=True, help_text='show error message when there is some error on the gallery area.', verbose_name='debug errors')), ('gallery_height', models.PositiveSmallIntegerField(default=500, help_text='Gallery height.', verbose_name='height')), ('gallery_images_preload_type', models.CharField(choices=[('all', 'load all the images first time'), ('minimal', 'only image nabours will be loaded each time'), ('visible', 'visible thumbs images will be loaded each time')], default='minimal', help_text='Preload type of the images.', max_length=8, verbose_name='preload type')), ('gallery_min_height', models.PositiveSmallIntegerField(default=300, help_text='Gallery minimal height when resizing.', verbose_name='minimum height')), ('gallery_pause_on_mouseover', models.BooleanField(default=False, help_text='Pause on mouseover when playing slideshow.', verbose_name='pause on mouseover')), ('gallery_play_interval', models.PositiveSmallIntegerField(default=3000, help_text='Play interval of the slideshow.', verbose_name='play interval')), ('gallery_preserve_ratio', models.BooleanField(default=True, help_text='Preserve aspect ratio on window resize.', verbose_name='carousel')), ('thumb_border_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='border color')), ('thumb_border_effect', models.BooleanField(default=True, help_text='Specify if the thumb has border.', verbose_name='border effect')), ('thumb_border_width', models.PositiveSmallIntegerField(default=0, verbose_name='border width')), ('thumb_color_overlay_effect', models.BooleanField(default=True, help_text='Thumbnail color overlay effect, release the overlay on mouseover and selected states.', verbose_name='color overlay effect')), ('thumb_fixed_size', models.BooleanField(choices=[(True, 'Fixed'), (False, 'Dynamic')], default=True, help_text='Fixed/dynamic thumbnail width.', verbose_name='sizing method')), ('thumb_height', models.PositiveSmallIntegerField(default=50, verbose_name='height')), ('thumb_image_overlay_effect', models.BooleanField(default=False, help_text='Images overlay effect on normal state only.', verbose_name='image overlay effect')), ('thumb_image_overlay_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image overlay type')), ('thumb_loader_type', models.CharField(choices=[('dark', 'Dark'), ('light', 'Light')], default='dark', max_length=8, verbose_name='loader type')), ('thumb_overlay_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='overlay color')), ('thumb_overlay_opacity', models.PositiveSmallIntegerField(default=40, help_text='Thumbnail overlay color opacity.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('thumb_overlay_reverse', models.BooleanField(default=False, help_text='Reverse the overlay, will be shown on selected state only.', verbose_name='overlay reverse')), ('thumb_over_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in mouseover state.', max_length=18, verbose_name='mouseover border color')), ('thumb_over_border_width', models.PositiveSmallIntegerField(default=0, help_text='Thumbnail border width in mouseover state.', verbose_name='mouseover border width')), ('thumb_round_corners_radius', models.PositiveSmallIntegerField(default=0, verbose_name='border radius')), ('thumb_selected_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in selected state.', max_length=18, verbose_name='selected border color')), ('thumb_selected_border_width', models.PositiveSmallIntegerField(default=1, help_text='Thumbnail border width in selected state.', verbose_name='selected border width')), ('thumb_show_loader', models.BooleanField(default=True, help_text='Show thumb loader while loading the thumb.', verbose_name='show loader')), ('thumb_transition_duration', models.PositiveSmallIntegerField(default=200, help_text='Thumbnail effect transition duration.', verbose_name='transition duration')), ('thumb_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutQuad', help_text='Thumb effect transition easing.', max_length=17, verbose_name='transition easing')), ('thumb_width', models.PositiveSmallIntegerField(default=88, verbose_name='width')), ('gridpanel_arrows_align_hor', models.CharField(choices=[('borders', 'Borders'), ('center', 'Center'), ('grid', 'Grid')], default='center', help_text='Horizontal alignment of arrows, to the left and right borders, to the grid, or in the center space.', max_length=7, verbose_name='arrows horizontal alignment')), ('gridpanel_arrows_align_vert', models.CharField(choices=[('borders', 'Borders'), ('middle', 'Middle'), ('grid', 'Grid')], default='middle', help_text='Vertical alignment of arrows, to the left and right borders, to the grid, or in the center space.', max_length=7, verbose_name='arrows vertical alignment')), ('gridpanel_arrows_always_on', models.BooleanField(default=False, help_text='Always show arrows even if the grid is one pane only.', verbose_name='arrows always on')), ('gridpanel_arrows_padding_hor', models.PositiveSmallIntegerField(default=10, help_text='In case of horizontal type only, minimal size from the grid in case of "borders" and size from the grid in case of "grid".', verbose_name='horizontal arrows padding')), ('gridpanel_arrows_padding_vert', models.PositiveSmallIntegerField(default=4, help_text='Padding between the arrows and the grid, in case of "middle" align, there will be minimal padding.', verbose_name='vertical arrows padding')), ('gridpanel_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('gridpanel_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='Background color of the grid wrapper, if not set, it will be taken from the CSS.', max_length=18, verbose_name='background color')), ('gridpanel_enable_handle', models.BooleanField(default=True, verbose_name='enable handle')), ('gridpanel_grid_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='middle', max_length=6, verbose_name='grid panel alignment')), ('gridpanel_handle_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='top', help_text='Close handle tip align on the handle bar according panel orientation.', max_length=6, verbose_name='handle alignment')), ('gridpanel_handle_offset', models.PositiveSmallIntegerField(default=0, help_text='Offset of handle bar according the vertical align.', verbose_name='handle offset')), ('gridpanel_handle_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the handle, if blank inherit from gallery skin.', max_length=255, verbose_name='handle')), ('gridpanel_padding_border_bottom', models.PositiveSmallIntegerField(default=4, help_text='Padding between the bottom border of the panel.', verbose_name='bottom padding')), ('gridpanel_padding_border_left', models.PositiveSmallIntegerField(default=10, help_text='Padding between the left border of the panel.', verbose_name='left padding')), ('gridpanel_padding_border_right', models.PositiveSmallIntegerField(default=10, help_text='Padding between the left border of the panel.', verbose_name='right padding')), ('gridpanel_padding_border_top', models.PositiveSmallIntegerField(default=4, help_text='Padding between the top border of the panel.', verbose_name='top padding')), ('gridpanel_space_between_arrows', models.PositiveSmallIntegerField(default=20, help_text='On horizontal grids only.', verbose_name='space between arrows')), ('gridpanel_vertical_scroll', models.BooleanField(choices=[(True, 'Vertical'), (False, 'Horizontal')], default=True, help_text='Vertical or horizontal grid scroll and arrows.', verbose_name='scroll direction')), ('grid_carousel', models.BooleanField(default=False, help_text='Next pane goes to first when on last.', verbose_name='carousel')), ('grid_num_cols', models.PositiveSmallIntegerField(default=2, help_text='Number of grid columns.', verbose_name='columns')), ('grid_panes_direction', models.CharField(choices=[('left', 'Left'), ('bottom', 'Bottom')], default='left', help_text='Where panes will move', max_length=6, verbose_name='panes direction')), ('grid_space_between_cols', models.PositiveSmallIntegerField(default=10, verbose_name='space between columns')), ('grid_space_between_rows', models.PositiveSmallIntegerField(default=10, verbose_name='space between rows')), ('grid_transition_duration', models.PositiveSmallIntegerField(default=300, help_text='Transition of the panes change duration.', verbose_name='transition duration (ms)')), ('grid_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Easing function for the pane change transition.', max_length=17, verbose_name='transition easing')), ('slug', models.SlugField(verbose_name='slug')), ('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')), ('full_thumbnail_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gridtheme_unite_plugins_full', related_query_name='gridtheme_unite_plugin_full', to='filer.ThumbnailOption', verbose_name='fullscreen thumbnail option')), ], options={ 'verbose_name': 'Grid theme plugin', 'verbose_name_plural': 'Grid theme plugins', }, bases=('cms.cmsplugin', models.Model), ), migrations.CreateModel( name='GridThemeSavedUniteOptions', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('theme_hide_panel_under_width', models.PositiveSmallIntegerField(blank=True, default=480, help_text="Hide panel under certain browser width, if null, don't hide.", null=True, verbose_name='hide panel under width')), ('theme_panel_position', models.CharField(choices=[('top', 'Top'), ('bottom', 'Bottom'), ('left', 'Left'), ('right', 'Right')], default='right', help_text='Thumbs panel position.', max_length=6, verbose_name='panel position')), ('slider_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('slider_arrow_left_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Left arrow horizonal alignment.', max_length=6, verbose_name='left arrow alignment')), ('slider_arrow_left_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='left arrow vertical alignment')), ('slider_arrow_left_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='left arrow horizontal offset')), ('slider_arrow_left_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='left arrow vertical offset')), ('slider_arrow_right_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Right arrow horizonal alignment.', max_length=6, verbose_name='Right arrow alignment')), ('slider_arrow_right_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='light arrow vertical alignment')), ('slider_arrow_right_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='right arrow horizontal offset')), ('slider_arrow_right_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='right arrow vertical offset')), ('slider_bullets_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Bullets horizontal alignment.', max_length=6, verbose_name='bullets alignment')), ('slider_bullets_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='bottom', max_length=6, verbose_name='bullets vertical alignment')), ('slider_bullets_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='bullets horizontal offset')), ('slider_bullets_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='bullets vertical offset')), ('slider_bullets_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the bullets, if blank inherit from gallery skin.', max_length=255, verbose_name='bullets skin')), ('slider_bullets_space_between', models.SmallIntegerField(blank=True, default=None, help_text='Set the space between bullets. If blank then will be set default space from the skins.', null=True, verbose_name='space between bullets')), ('slider_controls_always_on', models.BooleanField(default=True, help_text='Controls are always on, false - show only on mouseover.', verbose_name='controls always on')), ('slider_controls_appear_duration', models.PositiveSmallIntegerField(default=300, help_text='The duration of appearing controls.', verbose_name='controls appear duration')), ('slider_controls_appear_ontap', models.BooleanField(default=True, help_text='Appear controls on tap event on touch devices.', verbose_name='controls appear on tap')), ('slider_control_swipe', models.BooleanField(default=True, verbose_name='enable swiping control')), ('slider_control_zoom', models.BooleanField(default=True, verbose_name='enable zooming control')), ('slider_enable_arrows', models.BooleanField(default=True, verbose_name='enable arrows')), ('slider_enable_bullets', models.BooleanField(default=False, verbose_name='enable bullets')), ('slider_enable_fullscreen_button', models.BooleanField(default=True, verbose_name='enable fullscreen button')), ('slider_enable_play_button', models.BooleanField(default=True, verbose_name='enable play button')), ('slider_enable_progress_indicator', models.BooleanField(default=True, verbose_name='enable progress indicator')), ('slider_enable_text_panel', models.BooleanField(default=False, verbose_name='enable text panel')), ('slider_enable_zoom_panel', models.BooleanField(default=True, help_text='Enable the zoom buttons, works together with zoom control.', verbose_name='enable zoom panel')), ('slider_fullscreen_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Fullscreen button horizonatal alignment.', max_length=6, verbose_name='fullscreen button alignment')), ('slider_fullscreen_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='fullscreen button vertical alignment')), ('slider_fullscreen_button_offset_hor', models.PositiveSmallIntegerField(default=11, verbose_name='fullscreen button horizontal offset')), ('slider_fullscreen_button_offset_vert', models.PositiveSmallIntegerField(default=9, verbose_name='fullscreen button vertical offset')), ('slider_fullscreen_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider fullscreen button, if empty inherit from gallery skin.', max_length=255, verbose_name='fullscreen button skin')), ('slider_item_padding_bottom', models.PositiveSmallIntegerField(default=0, verbose_name='slider item bottom padding')), ('slider_item_padding_left', models.PositiveSmallIntegerField(default=0, verbose_name='slider item left padding')), ('slider_item_padding_right', models.PositiveSmallIntegerField(default=0, verbose_name='slider item right padding')), ('slider_item_padding_top', models.PositiveSmallIntegerField(default=0, verbose_name='slider item top padding')), ('slider_loader_color', models.CharField(choices=[('white', 'White'), ('black', 'Black')], default='white', max_length=6, verbose_name='loader color')), ('slider_loader_type', models.PositiveSmallIntegerField(choices=[(1, 'Shape 1'), (2, 'Shape 2'), (3, 'Shape 3'), (4, 'Shape 4'), (5, 'Shape 5'), (6, 'Shape 6'), (7, 'Shape 7')], default=1, verbose_name='shape of the loader')), ('slider_play_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Play button horizonatal alignment.', max_length=6, verbose_name='play button alignment')), ('slider_play_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='play button vertical alignment')), ('slider_play_button_offset_hor', models.PositiveSmallIntegerField(default=40, verbose_name='play button horizontal offset')), ('slider_play_button_offset_vert', models.PositiveSmallIntegerField(default=8, verbose_name='play button vertical offset')), ('slider_play_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider play button, if empty inherit from gallery skin.', max_length=255, verbose_name='play button skin')), ('slider_progressbar_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='progress bar color')), ('slider_progressbar_line_width', models.PositiveSmallIntegerField(default=5, verbose_name='progress bar line width')), ('slider_progressbar_opacity', models.PositiveSmallIntegerField(default=60, help_text='The opacity of the progress bar.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='progress bar opacity (%)')), ('slider_progresspie_color1', colorfield.fields.ColorField(default='#B5B5B5', max_length=18, verbose_name='progress pie first color')), ('slider_progresspie_color2', colorfield.fields.ColorField(default='#E5E5E5', max_length=18, verbose_name='progress pie second color')), ('slider_progresspie_height', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie height')), ('slider_progresspie_stroke_width', models.PositiveSmallIntegerField(default=6, verbose_name='progress pie stroke width')), ('slider_progresspie_type_fill', models.BooleanField(choices=[(False, 'Stroke'), (True, 'Fill')], default=False, verbose_name='progress pie type')), ('slider_progresspie_width', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie width')), ('slider_progress_indicator_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Progress indicator horizonatal alignment.', max_length=6, verbose_name='progress indicator alignment')), ('slider_progress_indicator_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='progress indicator vertical alignment')), ('slider_progress_indicator_offset_hor', models.PositiveSmallIntegerField(default=16, verbose_name='progress indicator horizontal offset ')), ('slider_progress_indicator_offset_vert', models.PositiveSmallIntegerField(default=36, verbose_name='progress indicator vertical offset ')), ('slider_progress_indicator_type', models.CharField(choices=[('pie', 'Pie'), ('pie2', 'Pie 2'), ('bar', 'Bar')], default='pie', help_text='if pie not supported, it will switch to bar automatically.', max_length=4, verbose_name='progress indicator type')), ('slider_scale_mode', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode')), ('slider_scale_mode_fullscreen', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='down', max_length=4, verbose_name='fullscreen scale mode')), ('slider_scale_mode_media', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode on media items')), ('slider_textpanel_always_on', models.BooleanField(default=True, help_text='Text panel are always on or show only on mouseover.', verbose_name='text panel always on')), ('slider_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='text panel background color')), ('slider_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, verbose_name='text panel background CSS')), ('slider_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='text panel background opacity (%)')), ('slider_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='text panel description CSS')), ('slider_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='text panel title CSS')), ('slider_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('slider_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel description text color')), ('slider_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('slider_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('slider_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_textpanel_enable_bg', models.BooleanField(default=True, help_text='Enable the textpanel background.', verbose_name='enable background')), ('slider_textpanel_enable_description', models.BooleanField(default=True, help_text='Enable the description text.', verbose_name='enable description')), ('slider_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('slider_textpanel_fade_duration', models.PositiveSmallIntegerField(default=200, verbose_name='text panel fade duration ')), ('slider_textpanel_height', models.PositiveSmallIntegerField(blank=True, default=None, help_text='If blank it will be set dynamically.', null=True, verbose_name='text panel height ')), ('slider_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=10, verbose_name='text panel bottom padding')), ('slider_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, verbose_name='text panel left padding')), ('slider_textpanel_padding_right', models.PositiveSmallIntegerField(default=10, verbose_name='text panel right padding')), ('slider_textpanel_padding_title_description', models.PositiveSmallIntegerField(default=5, help_text='the space between the title and the description.', verbose_name='text panel title padding')), ('slider_textpanel_padding_top', models.PositiveSmallIntegerField(default=10, verbose_name='text panel top padding')), ('slider_textpanel_text_valign', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='text panel vertical alignment')), ('slider_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold title')), ('slider_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel title text color')), ('slider_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('slider_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='title font size (px)')), ('slider_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_transition', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='The transition of the slide change.', max_length=5, verbose_name='transition')), ('slider_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Transition easing function of slide change.', max_length=17, verbose_name='transition easing')), ('slider_transition_speed', models.PositiveSmallIntegerField(default=300, help_text='Transition duration of slide change.', verbose_name='transition speed')), ('slider_videoplay_button_type', models.CharField(choices=[('square', 'Square'), ('round', 'Round')], default='square', max_length=6, verbose_name='videoplay button type')), ('slider_zoompanel_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Zoom panel horizonatal alignment.', max_length=6, verbose_name='zoom panel alignment')), ('slider_zoompanel_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='zoom panel vertical alignment')), ('slider_zoompanel_offset_hor', models.PositiveSmallIntegerField(default=12, verbose_name='zoom panel horizontal offset')), ('slider_zoompanel_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='zoom panel vertical offset')), ('slider_zoompanel_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider zoom panel, if empty inherit from gallery skin.', max_length=255, verbose_name='zoom panel skin')), ('slider_zoom_max_ratio', models.PositiveSmallIntegerField(default=6, verbose_name='maximum zoom ratio')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=400, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_skin', models.CharField(choices=[('default', 'Default'), ('alexis', 'Alexis')], max_length=255, verbose_name='gallery skin')), ('gallery_width', models.PositiveSmallIntegerField(default=900, help_text='Gallery width.', verbose_name='width')), ('gallery_autoplay', models.BooleanField(default=False, help_text='Begin slideshow autoplay on start.', verbose_name='gallery autoplay')), ('gallery_carousel', models.BooleanField(default=True, help_text='Next button on last image goes to first image.', verbose_name='carousel')), ('gallery_control_keyboard', models.BooleanField(default=True, help_text='Enable / disable keyboard controls.', verbose_name='keyboard')), ('gallery_control_thumbs_mousewheel', models.BooleanField(default=False, help_text='Enable / disable the mousewheel.', verbose_name='mousewheel')), ('gallery_debug_errors', models.BooleanField(default=True, help_text='show error message when there is some error on the gallery area.', verbose_name='debug errors')), ('gallery_height', models.PositiveSmallIntegerField(default=500, help_text='Gallery height.', verbose_name='height')), ('gallery_images_preload_type', models.CharField(choices=[('all', 'load all the images first time'), ('minimal', 'only image nabours will be loaded each time'), ('visible', 'visible thumbs images will be loaded each time')], default='minimal', help_text='Preload type of the images.', max_length=8, verbose_name='preload type')), ('gallery_min_height', models.PositiveSmallIntegerField(default=300, help_text='Gallery minimal height when resizing.', verbose_name='minimum height')), ('gallery_pause_on_mouseover', models.BooleanField(default=False, help_text='Pause on mouseover when playing slideshow.', verbose_name='pause on mouseover')), ('gallery_play_interval', models.PositiveSmallIntegerField(default=3000, help_text='Play interval of the slideshow.', verbose_name='play interval')), ('gallery_preserve_ratio', models.BooleanField(default=True, help_text='Preserve aspect ratio on window resize.', verbose_name='carousel')), ('thumb_border_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='border color')), ('thumb_border_effect', models.BooleanField(default=True, help_text='Specify if the thumb has border.', verbose_name='border effect')), ('thumb_border_width', models.PositiveSmallIntegerField(default=0, verbose_name='border width')), ('thumb_color_overlay_effect', models.BooleanField(default=True, help_text='Thumbnail color overlay effect, release the overlay on mouseover and selected states.', verbose_name='color overlay effect')), ('thumb_fixed_size', models.BooleanField(choices=[(True, 'Fixed'), (False, 'Dynamic')], default=True, help_text='Fixed/dynamic thumbnail width.', verbose_name='sizing method')), ('thumb_height', models.PositiveSmallIntegerField(default=50, verbose_name='height')), ('thumb_image_overlay_effect', models.BooleanField(default=False, help_text='Images overlay effect on normal state only.', verbose_name='image overlay effect')), ('thumb_image_overlay_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image overlay type')), ('thumb_loader_type', models.CharField(choices=[('dark', 'Dark'), ('light', 'Light')], default='dark', max_length=8, verbose_name='loader type')), ('thumb_overlay_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='overlay color')), ('thumb_overlay_opacity', models.PositiveSmallIntegerField(default=40, help_text='Thumbnail overlay color opacity.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('thumb_overlay_reverse', models.BooleanField(default=False, help_text='Reverse the overlay, will be shown on selected state only.', verbose_name='overlay reverse')), ('thumb_over_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in mouseover state.', max_length=18, verbose_name='mouseover border color')), ('thumb_over_border_width', models.PositiveSmallIntegerField(default=0, help_text='Thumbnail border width in mouseover state.', verbose_name='mouseover border width')), ('thumb_round_corners_radius', models.PositiveSmallIntegerField(default=0, verbose_name='border radius')), ('thumb_selected_border_color', colorfield.fields.ColorField(default='#D9D9D9', help_text='Thumbnail border color in selected state.', max_length=18, verbose_name='selected border color')), ('thumb_selected_border_width', models.PositiveSmallIntegerField(default=1, help_text='Thumbnail border width in selected state.', verbose_name='selected border width')), ('thumb_show_loader', models.BooleanField(default=True, help_text='Show thumb loader while loading the thumb.', verbose_name='show loader')), ('thumb_transition_duration', models.PositiveSmallIntegerField(default=200, help_text='Thumbnail effect transition duration.', verbose_name='transition duration')), ('thumb_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeOutQuad', help_text='Thumb effect transition easing.', max_length=17, verbose_name='transition easing')), ('thumb_width', models.PositiveSmallIntegerField(default=88, verbose_name='width')), ('gridpanel_arrows_align_hor', models.CharField(choices=[('borders', 'Borders'), ('center', 'Center'), ('grid', 'Grid')], default='center', help_text='Horizontal alignment of arrows, to the left and right borders, to the grid, or in the center space.', max_length=7, verbose_name='arrows horizontal alignment')), ('gridpanel_arrows_align_vert', models.CharField(choices=[('borders', 'Borders'), ('middle', 'Middle'), ('grid', 'Grid')], default='middle', help_text='Vertical alignment of arrows, to the left and right borders, to the grid, or in the center space.', max_length=7, verbose_name='arrows vertical alignment')), ('gridpanel_arrows_always_on', models.BooleanField(default=False, help_text='Always show arrows even if the grid is one pane only.', verbose_name='arrows always on')), ('gridpanel_arrows_padding_hor', models.PositiveSmallIntegerField(default=10, help_text='In case of horizontal type only, minimal size from the grid in case of "borders" and size from the grid in case of "grid".', verbose_name='horizontal arrows padding')), ('gridpanel_arrows_padding_vert', models.PositiveSmallIntegerField(default=4, help_text='Padding between the arrows and the grid, in case of "middle" align, there will be minimal padding.', verbose_name='vertical arrows padding')), ('gridpanel_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('gridpanel_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='Background color of the grid wrapper, if not set, it will be taken from the CSS.', max_length=18, verbose_name='background color')), ('gridpanel_enable_handle', models.BooleanField(default=True, verbose_name='enable handle')), ('gridpanel_grid_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='middle', max_length=6, verbose_name='grid panel alignment')), ('gridpanel_handle_align', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom'), ('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='top', help_text='Close handle tip align on the handle bar according panel orientation.', max_length=6, verbose_name='handle alignment')), ('gridpanel_handle_offset', models.PositiveSmallIntegerField(default=0, help_text='Offset of handle bar according the vertical align.', verbose_name='handle offset')), ('gridpanel_handle_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the handle, if blank inherit from gallery skin.', max_length=255, verbose_name='handle')), ('gridpanel_padding_border_bottom', models.PositiveSmallIntegerField(default=4, help_text='Padding between the bottom border of the panel.', verbose_name='bottom padding')), ('gridpanel_padding_border_left', models.PositiveSmallIntegerField(default=10, help_text='Padding between the left border of the panel.', verbose_name='left padding')), ('gridpanel_padding_border_right', models.PositiveSmallIntegerField(default=10, help_text='Padding between the left border of the panel.', verbose_name='right padding')), ('gridpanel_padding_border_top', models.PositiveSmallIntegerField(default=4, help_text='Padding between the top border of the panel.', verbose_name='top padding')), ('gridpanel_space_between_arrows', models.PositiveSmallIntegerField(default=20, help_text='On horizontal grids only.', verbose_name='space between arrows')), ('gridpanel_vertical_scroll', models.BooleanField(choices=[(True, 'Vertical'), (False, 'Horizontal')], default=True, help_text='Vertical or horizontal grid scroll and arrows.', verbose_name='scroll direction')), ('grid_carousel', models.BooleanField(default=False, help_text='Next pane goes to first when on last.', verbose_name='carousel')), ('grid_num_cols', models.PositiveSmallIntegerField(default=2, help_text='Number of grid columns.', verbose_name='columns')), ('grid_panes_direction', models.CharField(choices=[('left', 'Left'), ('bottom', 'Bottom')], default='left', help_text='Where panes will move', max_length=6, verbose_name='panes direction')), ('grid_space_between_cols', models.PositiveSmallIntegerField(default=10, verbose_name='space between columns')), ('grid_space_between_rows', models.PositiveSmallIntegerField(default=10, verbose_name='space between rows')), ('grid_transition_duration', models.PositiveSmallIntegerField(default=300, help_text='Transition of the panes change duration.', verbose_name='transition duration (ms)')), ('grid_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Easing function for the pane change transition.', max_length=17, verbose_name='transition easing')), ('name', models.CharField(max_length=255, verbose_name='name')), ], options={ 'verbose_name': 'Grid theme unite options', 'verbose_name_plural': 'Grid theme unite options', }, ), migrations.CreateModel( name='SliderPlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_aoxomoxoa_sliderplugin', serialize=False, to='cms.CMSPlugin')), ('slider_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('slider_arrow_left_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Left arrow horizonal alignment.', max_length=6, verbose_name='left arrow alignment')), ('slider_arrow_left_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='left arrow vertical alignment')), ('slider_arrow_left_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='left arrow horizontal offset')), ('slider_arrow_left_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='left arrow vertical offset')), ('slider_arrow_right_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Right arrow horizonal alignment.', max_length=6, verbose_name='Right arrow alignment')), ('slider_arrow_right_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='light arrow vertical alignment')), ('slider_arrow_right_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='right arrow horizontal offset')), ('slider_arrow_right_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='right arrow vertical offset')), ('slider_bullets_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Bullets horizontal alignment.', max_length=6, verbose_name='bullets alignment')), ('slider_bullets_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='bottom', max_length=6, verbose_name='bullets vertical alignment')), ('slider_bullets_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='bullets horizontal offset')), ('slider_bullets_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='bullets vertical offset')), ('slider_bullets_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the bullets, if blank inherit from gallery skin.', max_length=255, verbose_name='bullets skin')), ('slider_bullets_space_between', models.SmallIntegerField(blank=True, default=None, help_text='Set the space between bullets. If blank then will be set default space from the skins.', null=True, verbose_name='space between bullets')), ('slider_controls_always_on', models.BooleanField(default=True, help_text='Controls are always on, false - show only on mouseover.', verbose_name='controls always on')), ('slider_controls_appear_duration', models.PositiveSmallIntegerField(default=300, help_text='The duration of appearing controls.', verbose_name='controls appear duration')), ('slider_controls_appear_ontap', models.BooleanField(default=True, help_text='Appear controls on tap event on touch devices.', verbose_name='controls appear on tap')), ('slider_control_swipe', models.BooleanField(default=True, verbose_name='enable swiping control')), ('slider_control_zoom', models.BooleanField(default=True, verbose_name='enable zooming control')), ('slider_enable_arrows', models.BooleanField(default=True, verbose_name='enable arrows')), ('slider_enable_bullets', models.BooleanField(default=False, verbose_name='enable bullets')), ('slider_enable_fullscreen_button', models.BooleanField(default=True, verbose_name='enable fullscreen button')), ('slider_enable_play_button', models.BooleanField(default=True, verbose_name='enable play button')), ('slider_enable_progress_indicator', models.BooleanField(default=True, verbose_name='enable progress indicator')), ('slider_enable_text_panel', models.BooleanField(default=False, verbose_name='enable text panel')), ('slider_enable_zoom_panel', models.BooleanField(default=True, help_text='Enable the zoom buttons, works together with zoom control.', verbose_name='enable zoom panel')), ('slider_fullscreen_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Fullscreen button horizonatal alignment.', max_length=6, verbose_name='fullscreen button alignment')), ('slider_fullscreen_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='fullscreen button vertical alignment')), ('slider_fullscreen_button_offset_hor', models.PositiveSmallIntegerField(default=11, verbose_name='fullscreen button horizontal offset')), ('slider_fullscreen_button_offset_vert', models.PositiveSmallIntegerField(default=9, verbose_name='fullscreen button vertical offset')), ('slider_fullscreen_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider fullscreen button, if empty inherit from gallery skin.', max_length=255, verbose_name='fullscreen button skin')), ('slider_item_padding_bottom', models.PositiveSmallIntegerField(default=0, verbose_name='slider item bottom padding')), ('slider_item_padding_left', models.PositiveSmallIntegerField(default=0, verbose_name='slider item left padding')), ('slider_item_padding_right', models.PositiveSmallIntegerField(default=0, verbose_name='slider item right padding')), ('slider_item_padding_top', models.PositiveSmallIntegerField(default=0, verbose_name='slider item top padding')), ('slider_loader_color', models.CharField(choices=[('white', 'White'), ('black', 'Black')], default='white', max_length=6, verbose_name='loader color')), ('slider_loader_type', models.PositiveSmallIntegerField(choices=[(1, 'Shape 1'), (2, 'Shape 2'), (3, 'Shape 3'), (4, 'Shape 4'), (5, 'Shape 5'), (6, 'Shape 6'), (7, 'Shape 7')], default=1, verbose_name='shape of the loader')), ('slider_play_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Play button horizonatal alignment.', max_length=6, verbose_name='play button alignment')), ('slider_play_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='play button vertical alignment')), ('slider_play_button_offset_hor', models.PositiveSmallIntegerField(default=40, verbose_name='play button horizontal offset')), ('slider_play_button_offset_vert', models.PositiveSmallIntegerField(default=8, verbose_name='play button vertical offset')), ('slider_play_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider play button, if empty inherit from gallery skin.', max_length=255, verbose_name='play button skin')), ('slider_progressbar_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='progress bar color')), ('slider_progressbar_line_width', models.PositiveSmallIntegerField(default=5, verbose_name='progress bar line width')), ('slider_progressbar_opacity', models.PositiveSmallIntegerField(default=60, help_text='The opacity of the progress bar.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='progress bar opacity (%)')), ('slider_progresspie_color1', colorfield.fields.ColorField(default='#B5B5B5', max_length=18, verbose_name='progress pie first color')), ('slider_progresspie_color2', colorfield.fields.ColorField(default='#E5E5E5', max_length=18, verbose_name='progress pie second color')), ('slider_progresspie_height', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie height')), ('slider_progresspie_stroke_width', models.PositiveSmallIntegerField(default=6, verbose_name='progress pie stroke width')), ('slider_progresspie_type_fill', models.BooleanField(choices=[(False, 'Stroke'), (True, 'Fill')], default=False, verbose_name='progress pie type')), ('slider_progresspie_width', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie width')), ('slider_progress_indicator_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Progress indicator horizonatal alignment.', max_length=6, verbose_name='progress indicator alignment')), ('slider_progress_indicator_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='progress indicator vertical alignment')), ('slider_progress_indicator_offset_hor', models.PositiveSmallIntegerField(default=16, verbose_name='progress indicator horizontal offset ')), ('slider_progress_indicator_offset_vert', models.PositiveSmallIntegerField(default=36, verbose_name='progress indicator vertical offset ')), ('slider_progress_indicator_type', models.CharField(choices=[('pie', 'Pie'), ('pie2', 'Pie 2'), ('bar', 'Bar')], default='pie', help_text='if pie not supported, it will switch to bar automatically.', max_length=4, verbose_name='progress indicator type')), ('slider_scale_mode', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode')), ('slider_scale_mode_fullscreen', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='down', max_length=4, verbose_name='fullscreen scale mode')), ('slider_scale_mode_media', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode on media items')), ('slider_textpanel_always_on', models.BooleanField(default=True, help_text='Text panel are always on or show only on mouseover.', verbose_name='text panel always on')), ('slider_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='text panel background color')), ('slider_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, verbose_name='text panel background CSS')), ('slider_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='text panel background opacity (%)')), ('slider_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='text panel description CSS')), ('slider_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='text panel title CSS')), ('slider_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('slider_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel description text color')), ('slider_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('slider_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('slider_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_textpanel_enable_bg', models.BooleanField(default=True, help_text='Enable the textpanel background.', verbose_name='enable background')), ('slider_textpanel_enable_description', models.BooleanField(default=True, help_text='Enable the description text.', verbose_name='enable description')), ('slider_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('slider_textpanel_fade_duration', models.PositiveSmallIntegerField(default=200, verbose_name='text panel fade duration ')), ('slider_textpanel_height', models.PositiveSmallIntegerField(blank=True, default=None, help_text='If blank it will be set dynamically.', null=True, verbose_name='text panel height ')), ('slider_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=10, verbose_name='text panel bottom padding')), ('slider_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, verbose_name='text panel left padding')), ('slider_textpanel_padding_right', models.PositiveSmallIntegerField(default=10, verbose_name='text panel right padding')), ('slider_textpanel_padding_title_description', models.PositiveSmallIntegerField(default=5, help_text='the space between the title and the description.', verbose_name='text panel title padding')), ('slider_textpanel_padding_top', models.PositiveSmallIntegerField(default=10, verbose_name='text panel top padding')), ('slider_textpanel_text_valign', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='text panel vertical alignment')), ('slider_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold title')), ('slider_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel title text color')), ('slider_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('slider_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='title font size (px)')), ('slider_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_transition', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='The transition of the slide change.', max_length=5, verbose_name='transition')), ('slider_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Transition easing function of slide change.', max_length=17, verbose_name='transition easing')), ('slider_transition_speed', models.PositiveSmallIntegerField(default=300, help_text='Transition duration of slide change.', verbose_name='transition speed')), ('slider_videoplay_button_type', models.CharField(choices=[('square', 'Square'), ('round', 'Round')], default='square', max_length=6, verbose_name='videoplay button type')), ('slider_zoompanel_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Zoom panel horizonatal alignment.', max_length=6, verbose_name='zoom panel alignment')), ('slider_zoompanel_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='zoom panel vertical alignment')), ('slider_zoompanel_offset_hor', models.PositiveSmallIntegerField(default=12, verbose_name='zoom panel horizontal offset')), ('slider_zoompanel_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='zoom panel vertical offset')), ('slider_zoompanel_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider zoom panel, if empty inherit from gallery skin.', max_length=255, verbose_name='zoom panel skin')), ('slider_zoom_max_ratio', models.PositiveSmallIntegerField(default=6, verbose_name='maximum zoom ratio')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=400, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_skin', models.CharField(choices=[('default', 'Default'), ('alexis', 'Alexis')], max_length=255, verbose_name='gallery skin')), ('gallery_width', models.PositiveSmallIntegerField(default=900, help_text='Gallery width.', verbose_name='width')), ('gallery_autoplay', models.BooleanField(default=False, help_text='Begin slideshow autoplay on start.', verbose_name='gallery autoplay')), ('gallery_carousel', models.BooleanField(default=True, help_text='Next button on last image goes to first image.', verbose_name='carousel')), ('gallery_control_keyboard', models.BooleanField(default=True, help_text='Enable / disable keyboard controls.', verbose_name='keyboard')), ('gallery_control_thumbs_mousewheel', models.BooleanField(default=False, help_text='Enable / disable the mousewheel.', verbose_name='mousewheel')), ('gallery_debug_errors', models.BooleanField(default=True, help_text='show error message when there is some error on the gallery area.', verbose_name='debug errors')), ('gallery_height', models.PositiveSmallIntegerField(default=500, help_text='Gallery height.', verbose_name='height')), ('gallery_images_preload_type', models.CharField(choices=[('all', 'load all the images first time'), ('minimal', 'only image nabours will be loaded each time'), ('visible', 'visible thumbs images will be loaded each time')], default='minimal', help_text='Preload type of the images.', max_length=8, verbose_name='preload type')), ('gallery_min_height', models.PositiveSmallIntegerField(default=300, help_text='Gallery minimal height when resizing.', verbose_name='minimum height')), ('gallery_pause_on_mouseover', models.BooleanField(default=False, help_text='Pause on mouseover when playing slideshow.', verbose_name='pause on mouseover')), ('gallery_play_interval', models.PositiveSmallIntegerField(default=3000, help_text='Play interval of the slideshow.', verbose_name='play interval')), ('gallery_preserve_ratio', models.BooleanField(default=True, help_text='Preserve aspect ratio on window resize.', verbose_name='carousel')), ('slug', models.SlugField(verbose_name='slug')), ('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')), ('full_thumbnail_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='slider_unite_plugins_full', related_query_name='slider_unite_plugin_full', to='filer.ThumbnailOption', verbose_name='fullscreen thumbnail option')), ], options={ 'verbose_name': 'Slider plugin', 'verbose_name_plural': 'Slider plugins', }, bases=('cms.cmsplugin', models.Model), ), migrations.CreateModel( name='SliderSavedUniteOptions', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slider_arrows_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider arrows, if blank inherit from gallery skin.', max_length=255, verbose_name='arrows skin')), ('slider_arrow_left_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Left arrow horizonal alignment.', max_length=6, verbose_name='left arrow alignment')), ('slider_arrow_left_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='left arrow vertical alignment')), ('slider_arrow_left_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='left arrow horizontal offset')), ('slider_arrow_left_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='left arrow vertical offset')), ('slider_arrow_right_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Right arrow horizonal alignment.', max_length=6, verbose_name='Right arrow alignment')), ('slider_arrow_right_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='light arrow vertical alignment')), ('slider_arrow_right_offset_hor', models.PositiveSmallIntegerField(default=20, verbose_name='right arrow horizontal offset')), ('slider_arrow_right_offset_vert', models.PositiveSmallIntegerField(default=0, verbose_name='right arrow vertical offset')), ('slider_bullets_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Bullets horizontal alignment.', max_length=6, verbose_name='bullets alignment')), ('slider_bullets_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='bottom', max_length=6, verbose_name='bullets vertical alignment')), ('slider_bullets_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='bullets horizontal offset')), ('slider_bullets_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='bullets vertical offset')), ('slider_bullets_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the bullets, if blank inherit from gallery skin.', max_length=255, verbose_name='bullets skin')), ('slider_bullets_space_between', models.SmallIntegerField(blank=True, default=None, help_text='Set the space between bullets. If blank then will be set default space from the skins.', null=True, verbose_name='space between bullets')), ('slider_controls_always_on', models.BooleanField(default=True, help_text='Controls are always on, false - show only on mouseover.', verbose_name='controls always on')), ('slider_controls_appear_duration', models.PositiveSmallIntegerField(default=300, help_text='The duration of appearing controls.', verbose_name='controls appear duration')), ('slider_controls_appear_ontap', models.BooleanField(default=True, help_text='Appear controls on tap event on touch devices.', verbose_name='controls appear on tap')), ('slider_control_swipe', models.BooleanField(default=True, verbose_name='enable swiping control')), ('slider_control_zoom', models.BooleanField(default=True, verbose_name='enable zooming control')), ('slider_enable_arrows', models.BooleanField(default=True, verbose_name='enable arrows')), ('slider_enable_bullets', models.BooleanField(default=False, verbose_name='enable bullets')), ('slider_enable_fullscreen_button', models.BooleanField(default=True, verbose_name='enable fullscreen button')), ('slider_enable_play_button', models.BooleanField(default=True, verbose_name='enable play button')), ('slider_enable_progress_indicator', models.BooleanField(default=True, verbose_name='enable progress indicator')), ('slider_enable_text_panel', models.BooleanField(default=False, verbose_name='enable text panel')), ('slider_enable_zoom_panel', models.BooleanField(default=True, help_text='Enable the zoom buttons, works together with zoom control.', verbose_name='enable zoom panel')), ('slider_fullscreen_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Fullscreen button horizonatal alignment.', max_length=6, verbose_name='fullscreen button alignment')), ('slider_fullscreen_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='fullscreen button vertical alignment')), ('slider_fullscreen_button_offset_hor', models.PositiveSmallIntegerField(default=11, verbose_name='fullscreen button horizontal offset')), ('slider_fullscreen_button_offset_vert', models.PositiveSmallIntegerField(default=9, verbose_name='fullscreen button vertical offset')), ('slider_fullscreen_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider fullscreen button, if empty inherit from gallery skin.', max_length=255, verbose_name='fullscreen button skin')), ('slider_item_padding_bottom', models.PositiveSmallIntegerField(default=0, verbose_name='slider item bottom padding')), ('slider_item_padding_left', models.PositiveSmallIntegerField(default=0, verbose_name='slider item left padding')), ('slider_item_padding_right', models.PositiveSmallIntegerField(default=0, verbose_name='slider item right padding')), ('slider_item_padding_top', models.PositiveSmallIntegerField(default=0, verbose_name='slider item top padding')), ('slider_loader_color', models.CharField(choices=[('white', 'White'), ('black', 'Black')], default='white', max_length=6, verbose_name='loader color')), ('slider_loader_type', models.PositiveSmallIntegerField(choices=[(1, 'Shape 1'), (2, 'Shape 2'), (3, 'Shape 3'), (4, 'Shape 4'), (5, 'Shape 5'), (6, 'Shape 6'), (7, 'Shape 7')], default=1, verbose_name='shape of the loader')), ('slider_play_button_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Play button horizonatal alignment.', max_length=6, verbose_name='play button alignment')), ('slider_play_button_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='play button vertical alignment')), ('slider_play_button_offset_hor', models.PositiveSmallIntegerField(default=40, verbose_name='play button horizontal offset')), ('slider_play_button_offset_vert', models.PositiveSmallIntegerField(default=8, verbose_name='play button vertical offset')), ('slider_play_button_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider play button, if empty inherit from gallery skin.', max_length=255, verbose_name='play button skin')), ('slider_progressbar_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='progress bar color')), ('slider_progressbar_line_width', models.PositiveSmallIntegerField(default=5, verbose_name='progress bar line width')), ('slider_progressbar_opacity', models.PositiveSmallIntegerField(default=60, help_text='The opacity of the progress bar.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='progress bar opacity (%)')), ('slider_progresspie_color1', colorfield.fields.ColorField(default='#B5B5B5', max_length=18, verbose_name='progress pie first color')), ('slider_progresspie_color2', colorfield.fields.ColorField(default='#E5E5E5', max_length=18, verbose_name='progress pie second color')), ('slider_progresspie_height', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie height')), ('slider_progresspie_stroke_width', models.PositiveSmallIntegerField(default=6, verbose_name='progress pie stroke width')), ('slider_progresspie_type_fill', models.BooleanField(choices=[(False, 'Stroke'), (True, 'Fill')], default=False, verbose_name='progress pie type')), ('slider_progresspie_width', models.PositiveSmallIntegerField(default=30, verbose_name='progress pie width')), ('slider_progress_indicator_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='left', help_text='Progress indicator horizonatal alignment.', max_length=6, verbose_name='progress indicator alignment')), ('slider_progress_indicator_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='progress indicator vertical alignment')), ('slider_progress_indicator_offset_hor', models.PositiveSmallIntegerField(default=16, verbose_name='progress indicator horizontal offset ')), ('slider_progress_indicator_offset_vert', models.PositiveSmallIntegerField(default=36, verbose_name='progress indicator vertical offset ')), ('slider_progress_indicator_type', models.CharField(choices=[('pie', 'Pie'), ('pie2', 'Pie 2'), ('bar', 'Bar')], default='pie', help_text='if pie not supported, it will switch to bar automatically.', max_length=4, verbose_name='progress indicator type')), ('slider_scale_mode', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode')), ('slider_scale_mode_fullscreen', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='down', max_length=4, verbose_name='fullscreen scale mode')), ('slider_scale_mode_media', models.CharField(choices=[('fit', 'scale down and up the image to always fit the slider'), ('down', "scale down only, smaller images will be shown, don't enlarge images (scale up)"), ('fill', 'fill the entire slider space by scaling, cropping and centering the image'), ('fitvert', 'make the image always fill the vertical slider area')], default='fill', max_length=4, verbose_name='scale mode on media items')), ('slider_textpanel_always_on', models.BooleanField(default=True, help_text='Text panel are always on or show only on mouseover.', verbose_name='text panel always on')), ('slider_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='text panel background color')), ('slider_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, verbose_name='text panel background CSS')), ('slider_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='text panel background opacity (%)')), ('slider_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='text panel description CSS')), ('slider_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='text panel title CSS')), ('slider_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('slider_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel description text color')), ('slider_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('slider_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('slider_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_textpanel_enable_bg', models.BooleanField(default=True, help_text='Enable the textpanel background.', verbose_name='enable background')), ('slider_textpanel_enable_description', models.BooleanField(default=True, help_text='Enable the description text.', verbose_name='enable description')), ('slider_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('slider_textpanel_fade_duration', models.PositiveSmallIntegerField(default=200, verbose_name='text panel fade duration ')), ('slider_textpanel_height', models.PositiveSmallIntegerField(blank=True, default=None, help_text='If blank it will be set dynamically.', null=True, verbose_name='text panel height ')), ('slider_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=10, verbose_name='text panel bottom padding')), ('slider_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, verbose_name='text panel left padding')), ('slider_textpanel_padding_right', models.PositiveSmallIntegerField(default=10, verbose_name='text panel right padding')), ('slider_textpanel_padding_title_description', models.PositiveSmallIntegerField(default=5, help_text='the space between the title and the description.', verbose_name='text panel title padding')), ('slider_textpanel_padding_top', models.PositiveSmallIntegerField(default=10, verbose_name='text panel top padding')), ('slider_textpanel_text_valign', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='middle', max_length=6, verbose_name='text panel vertical alignment')), ('slider_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold title')), ('slider_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='text panel title text color')), ('slider_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('slider_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='title font size (px)')), ('slider_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('slider_transition', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='The transition of the slide change.', max_length=5, verbose_name='transition')), ('slider_transition_easing', models.CharField(choices=[('easeInOutBack', 'easeInOutBack'), ('easeInOutBounce', 'easeInOutBounce'), ('easeInOutCirc', 'easeInOutCirc'), ('easeInOutCubic', 'easeInOutCubic'), ('easeInOutElastic', 'easeInOutElastic'), ('easeInOutExpo', 'easeInOutExpo'), ('easeInOutQuad', 'easeInOutQuad'), ('easeInOutQuart', 'easeInOutQuart'), ('easeInOutQuint', 'easeInOutQuint'), ('easeInOutSine', 'easeInOutSine'), ('easeInBack', 'easeInBack'), ('easeInBounce', 'easeInBounce'), ('easeInCirc', 'easeInCirc'), ('easeInCubic', 'easeInCubic'), ('easeInElastic', 'easeInElastic'), ('easeInExpo', 'easeInExpo'), ('easeInQuad', 'easeInQuad'), ('easeInQuart', 'easeInQuart'), ('easeInQuint', 'easeInQuint'), ('easeInSine', 'easeInSine'), ('easeOutBack', 'easeOutBack'), ('easeOutBounce', 'easeOutBounce'), ('easeOutCirc', 'easeOutCirc'), ('easeOutCubic', 'easeOutCubic'), ('easeOutElastic', 'easeOutElastic'), ('easeOutExpo', 'easeOutExpo'), ('easeOutQuad', 'easeOutQuad'), ('easeOutQuart', 'easeOutQuart'), ('easeOutQuint', 'easeOutQuint'), ('easeOutSine', 'easeOutSine'), ('swing', 'swing')], default='easeInOutQuad', help_text='Transition easing function of slide change.', max_length=17, verbose_name='transition easing')), ('slider_transition_speed', models.PositiveSmallIntegerField(default=300, help_text='Transition duration of slide change.', verbose_name='transition speed')), ('slider_videoplay_button_type', models.CharField(choices=[('square', 'Square'), ('round', 'Round')], default='square', max_length=6, verbose_name='videoplay button type')), ('slider_zoompanel_align_hor', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='right', help_text='Zoom panel horizonatal alignment.', max_length=6, verbose_name='zoom panel alignment')), ('slider_zoompanel_align_vert', models.CharField(choices=[('top', 'Top'), ('middle', 'Middle'), ('bottom', 'Bottom')], default='top', max_length=6, verbose_name='zoom panel vertical alignment')), ('slider_zoompanel_offset_hor', models.PositiveSmallIntegerField(default=12, verbose_name='zoom panel horizontal offset')), ('slider_zoompanel_offset_vert', models.PositiveSmallIntegerField(default=10, verbose_name='zoom panel vertical offset')), ('slider_zoompanel_skin', models.CharField(blank=True, choices=[('default', 'Default'), ('alexis', 'Alexis')], default='', help_text='Skin of the slider zoom panel, if empty inherit from gallery skin.', max_length=255, verbose_name='zoom panel skin')), ('slider_zoom_max_ratio', models.PositiveSmallIntegerField(default=6, verbose_name='maximum zoom ratio')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=400, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_skin', models.CharField(choices=[('default', 'Default'), ('alexis', 'Alexis')], max_length=255, verbose_name='gallery skin')), ('gallery_width', models.PositiveSmallIntegerField(default=900, help_text='Gallery width.', verbose_name='width')), ('gallery_autoplay', models.BooleanField(default=False, help_text='Begin slideshow autoplay on start.', verbose_name='gallery autoplay')), ('gallery_carousel', models.BooleanField(default=True, help_text='Next button on last image goes to first image.', verbose_name='carousel')), ('gallery_control_keyboard', models.BooleanField(default=True, help_text='Enable / disable keyboard controls.', verbose_name='keyboard')), ('gallery_control_thumbs_mousewheel', models.BooleanField(default=False, help_text='Enable / disable the mousewheel.', verbose_name='mousewheel')), ('gallery_debug_errors', models.BooleanField(default=True, help_text='show error message when there is some error on the gallery area.', verbose_name='debug errors')), ('gallery_height', models.PositiveSmallIntegerField(default=500, help_text='Gallery height.', verbose_name='height')), ('gallery_images_preload_type', models.CharField(choices=[('all', 'load all the images first time'), ('minimal', 'only image nabours will be loaded each time'), ('visible', 'visible thumbs images will be loaded each time')], default='minimal', help_text='Preload type of the images.', max_length=8, verbose_name='preload type')), ('gallery_min_height', models.PositiveSmallIntegerField(default=300, help_text='Gallery minimal height when resizing.', verbose_name='minimum height')), ('gallery_pause_on_mouseover', models.BooleanField(default=False, help_text='Pause on mouseover when playing slideshow.', verbose_name='pause on mouseover')), ('gallery_play_interval', models.PositiveSmallIntegerField(default=3000, help_text='Play interval of the slideshow.', verbose_name='play interval')), ('gallery_preserve_ratio', models.BooleanField(default=True, help_text='Preserve aspect ratio on window resize.', verbose_name='carousel')), ('name', models.CharField(max_length=255, verbose_name='name')), ], options={ 'verbose_name': 'Slider unite options', 'verbose_name_plural': 'Slider unite options', }, ), migrations.CreateModel( name='TilesColumnsPlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_aoxomoxoa_tilescolumnsplugin', serialize=False, to='cms.CMSPlugin')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_auto_open', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Auto open lightbox at start. If some number gived, like 0.', null=True, verbose_name='auto open')), ('theme_enable_preloader', models.BooleanField(default=True, help_text='Enable preloader circle.', verbose_name='enable preloader')), ('theme_preloading_height', models.PositiveSmallIntegerField(default=200, help_text='The height of the preloading div, it is shown before the gallery.', verbose_name='preloading height')), ('theme_preloader_vertpos', models.PositiveSmallIntegerField(default=100, help_text='The vertical position of the preloader.', verbose_name='preloader vertical position')), ('theme_appearance_order', models.CharField(choices=[('normal', 'Normal'), ('shuffle', 'Shuffle'), ('keep', 'Keep order')], default='normal', help_text='The appearance order of the tiles.', max_length=8, verbose_name='appearance order')), ('tiles_enable_transition', models.BooleanField(default=True, help_text='enable transition on screen width change.', verbose_name='enable transition')), ('tiles_space_between_cols', models.PositiveSmallIntegerField(default=3, verbose_name='space between images')), ('tiles_space_between_cols_mobile', models.PositiveSmallIntegerField(default=3, help_text='Space between cols for mobile type.', verbose_name='space between images (mobile)')), ('tiles_min_columns', models.PositiveSmallIntegerField(default=2, help_text='Maximum number of columns, for mobile size.', verbose_name='min columns')), ('tiles_set_initial_height', models.BooleanField(default=True, help_text='Columns type related only.', verbose_name='set initial height')), ('tiles_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Alignment of the tiles in the space.', max_length=6, verbose_name='align')), ('tiles_col_width', models.PositiveSmallIntegerField(default=250, help_text='Column width. Exact or base according the settings.', verbose_name='column width')), ('tiles_exact_width', models.BooleanField(default=False, help_text='Exact width of column. Disables the min and max columns.', verbose_name='exact width')), ('tiles_include_padding', models.BooleanField(default=True, help_text='Include padding at the sides of the columns, equal to current space between cols.', verbose_name='include padding')), ('tiles_max_columns', models.PositiveSmallIntegerField(default=0, help_text='Maximum number of columns (0 for unlimited).', verbose_name='maximum columns')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=0, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=False, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=False, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('slug', models.SlugField(verbose_name='slug')), ('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')), ('full_thumbnail_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tilescolumns_unite_plugins_full', related_query_name='tilescolumns_unite_plugin_full', to='filer.ThumbnailOption', verbose_name='fullscreen thumbnail option')), ], options={ 'verbose_name': 'Tiles - Columns plugin', 'verbose_name_plural': 'Tiles - Columns plugins', }, bases=('cms.cmsplugin', models.Model), ), migrations.CreateModel( name='TilesColumnsSavedUniteOptions', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_auto_open', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Auto open lightbox at start. If some number gived, like 0.', null=True, verbose_name='auto open')), ('theme_enable_preloader', models.BooleanField(default=True, help_text='Enable preloader circle.', verbose_name='enable preloader')), ('theme_preloading_height', models.PositiveSmallIntegerField(default=200, help_text='The height of the preloading div, it is shown before the gallery.', verbose_name='preloading height')), ('theme_preloader_vertpos', models.PositiveSmallIntegerField(default=100, help_text='The vertical position of the preloader.', verbose_name='preloader vertical position')), ('theme_appearance_order', models.CharField(choices=[('normal', 'Normal'), ('shuffle', 'Shuffle'), ('keep', 'Keep order')], default='normal', help_text='The appearance order of the tiles.', max_length=8, verbose_name='appearance order')), ('tiles_enable_transition', models.BooleanField(default=True, help_text='enable transition on screen width change.', verbose_name='enable transition')), ('tiles_space_between_cols', models.PositiveSmallIntegerField(default=3, verbose_name='space between images')), ('tiles_space_between_cols_mobile', models.PositiveSmallIntegerField(default=3, help_text='Space between cols for mobile type.', verbose_name='space between images (mobile)')), ('tiles_min_columns', models.PositiveSmallIntegerField(default=2, help_text='Maximum number of columns, for mobile size.', verbose_name='min columns')), ('tiles_set_initial_height', models.BooleanField(default=True, help_text='Columns type related only.', verbose_name='set initial height')), ('tiles_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='Alignment of the tiles in the space.', max_length=6, verbose_name='align')), ('tiles_col_width', models.PositiveSmallIntegerField(default=250, help_text='Column width. Exact or base according the settings.', verbose_name='column width')), ('tiles_exact_width', models.BooleanField(default=False, help_text='Exact width of column. Disables the min and max columns.', verbose_name='exact width')), ('tiles_include_padding', models.BooleanField(default=True, help_text='Include padding at the sides of the columns, equal to current space between cols.', verbose_name='include padding')), ('tiles_max_columns', models.PositiveSmallIntegerField(default=0, help_text='Maximum number of columns (0 for unlimited).', verbose_name='maximum columns')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=0, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=False, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=False, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('name', models.CharField(max_length=255, verbose_name='name')), ], options={ 'verbose_name': 'Tiles - Columns unite options', 'verbose_name_plural': 'Tiles - Columns unite options', }, ), migrations.CreateModel( name='TilesGridPlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_aoxomoxoa_tilesgridplugin', serialize=False, to='cms.CMSPlugin')), ('tile_height', models.PositiveSmallIntegerField(default=150, verbose_name='tile height')), ('tile_width', models.PositiveSmallIntegerField(default=180, verbose_name='tile height')), ('grid_padding', models.PositiveSmallIntegerField(default=10, verbose_name='tile height')), ('grid_space_between_cols', models.PositiveSmallIntegerField(default=20, verbose_name='space between columns')), ('grid_space_between_rows', models.PositiveSmallIntegerField(default=20, verbose_name='space between rows')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_auto_open', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Auto open lightbox at start. If some number gived, like 0.', null=True, verbose_name='auto open')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('theme_navigation_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='The align of the navigation.', max_length=6, verbose_name='navigation align')), ('theme_navigation_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='horizontal offset')), ('theme_space_between_arrows', models.PositiveSmallIntegerField(default=5, help_text='The space between arrows in the navigation.', verbose_name='space between arrows')), ('bullets_space_between', models.PositiveSmallIntegerField(default=12, verbose_name='space between bullets')), ('grid_num_rows', models.PositiveSmallIntegerField(default=3, help_text='Maximum number of grid rows. If set to big value, the navigation will not appear.', verbose_name='number of rows')), ('theme_arrows_margin_top', models.PositiveSmallIntegerField(default=20, help_text='The space between arrows and grid.', verbose_name='arrows top margin')), ('theme_bullets_color', models.CharField(choices=[('gray', 'Gray'), ('blue', 'Blue'), ('brown', 'Brown'), ('green', 'Green'), ('red', 'Red')], default='gray', max_length=6, verbose_name='bullet color')), ('theme_bullets_margin_top', models.PositiveSmallIntegerField(default=40, help_text='The space between bullets and grid.', verbose_name='bullets top margin')), ('theme_navigation_type', models.CharField(choices=[('bullets', 'Bullets'), ('arrows', 'Arrows')], default='bullets', help_text='The vertical position of the navigation reative to the carousel.', max_length=7, verbose_name='navigation type')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=2, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=True, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=False, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('slug', models.SlugField(verbose_name='slug')), ('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')), ('full_thumbnail_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tilesgrid_unite_plugins_full', related_query_name='tilesgrid_unite_plugin_full', to='filer.ThumbnailOption', verbose_name='fullscreen thumbnail option')), ], options={ 'verbose_name': 'Tiles - Grid plugin', 'verbose_name_plural': 'Tiles - Grid plugins', }, bases=('cms.cmsplugin', models.Model), ), migrations.CreateModel( name='TilesGridSavedUniteOptions', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tile_height', models.PositiveSmallIntegerField(default=150, verbose_name='tile height')), ('tile_width', models.PositiveSmallIntegerField(default=180, verbose_name='tile height')), ('grid_padding', models.PositiveSmallIntegerField(default=10, verbose_name='tile height')), ('grid_space_between_cols', models.PositiveSmallIntegerField(default=20, verbose_name='space between columns')), ('grid_space_between_rows', models.PositiveSmallIntegerField(default=20, verbose_name='space between rows')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_auto_open', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Auto open lightbox at start. If some number gived, like 0.', null=True, verbose_name='auto open')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('theme_navigation_align', models.CharField(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='center', help_text='The align of the navigation.', max_length=6, verbose_name='navigation align')), ('theme_navigation_offset_hor', models.PositiveSmallIntegerField(default=0, verbose_name='horizontal offset')), ('theme_space_between_arrows', models.PositiveSmallIntegerField(default=5, help_text='The space between arrows in the navigation.', verbose_name='space between arrows')), ('bullets_space_between', models.PositiveSmallIntegerField(default=12, verbose_name='space between bullets')), ('grid_num_rows', models.PositiveSmallIntegerField(default=3, help_text='Maximum number of grid rows. If set to big value, the navigation will not appear.', verbose_name='number of rows')), ('theme_arrows_margin_top', models.PositiveSmallIntegerField(default=20, help_text='The space between arrows and grid.', verbose_name='arrows top margin')), ('theme_bullets_color', models.CharField(choices=[('gray', 'Gray'), ('blue', 'Blue'), ('brown', 'Brown'), ('green', 'Green'), ('red', 'Red')], default='gray', max_length=6, verbose_name='bullet color')), ('theme_bullets_margin_top', models.PositiveSmallIntegerField(default=40, help_text='The space between bullets and grid.', verbose_name='bullets top margin')), ('theme_navigation_type', models.CharField(choices=[('bullets', 'Bullets'), ('arrows', 'Arrows')], default='bullets', help_text='The vertical position of the navigation reative to the carousel.', max_length=7, verbose_name='navigation type')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=2, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=True, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=False, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('name', models.CharField(max_length=255, verbose_name='name')), ], options={ 'verbose_name': 'Tiles - Grid unite options', 'verbose_name_plural': 'Tiles - Grid unite options', }, ), migrations.CreateModel( name='TilesJustifiedPlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_aoxomoxoa_tilesjustifiedplugin', serialize=False, to='cms.CMSPlugin')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_auto_open', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Auto open lightbox at start. If some number gived, like 0.', null=True, verbose_name='auto open')), ('theme_enable_preloader', models.BooleanField(default=True, help_text='Enable preloader circle.', verbose_name='enable preloader')), ('theme_preloading_height', models.PositiveSmallIntegerField(default=200, help_text='The height of the preloading div, it is shown before the gallery.', verbose_name='preloading height')), ('theme_preloader_vertpos', models.PositiveSmallIntegerField(default=100, help_text='The vertical position of the preloader.', verbose_name='preloader vertical position')), ('tiles_enable_transition', models.BooleanField(default=True, help_text='enable transition on screen width change.', verbose_name='enable transition')), ('tiles_set_initial_height', models.BooleanField(default=True, help_text='Columns type related only.', verbose_name='set initial height')), ('tiles_justified_row_height', models.PositiveSmallIntegerField(default=150, help_text='Base row height of the justified type.', verbose_name='row height')), ('tiles_justified_space_between', models.PositiveSmallIntegerField(default=3, help_text='Space between the tiles justified type.', verbose_name='space between')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=0, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=False, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=False, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('slug', models.SlugField(verbose_name='slug')), ('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')), ('full_thumbnail_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tilesjustified_unite_plugins_full', related_query_name='tilesjustified_unite_plugin_full', to='filer.ThumbnailOption', verbose_name='fullscreen thumbnail option')), ], options={ 'verbose_name': 'Tiles - Justified plugin', 'verbose_name_plural': 'Tiles - Justified plugins', }, bases=('cms.cmsplugin', models.Model), ), migrations.CreateModel( name='TilesJustifiedSavedUniteOptions', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_auto_open', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Auto open lightbox at start. If some number gived, like 0.', null=True, verbose_name='auto open')), ('theme_enable_preloader', models.BooleanField(default=True, help_text='Enable preloader circle.', verbose_name='enable preloader')), ('theme_preloading_height', models.PositiveSmallIntegerField(default=200, help_text='The height of the preloading div, it is shown before the gallery.', verbose_name='preloading height')), ('theme_preloader_vertpos', models.PositiveSmallIntegerField(default=100, help_text='The vertical position of the preloader.', verbose_name='preloader vertical position')), ('tiles_enable_transition', models.BooleanField(default=True, help_text='enable transition on screen width change.', verbose_name='enable transition')), ('tiles_set_initial_height', models.BooleanField(default=True, help_text='Columns type related only.', verbose_name='set initial height')), ('tiles_justified_row_height', models.PositiveSmallIntegerField(default=150, help_text='Base row height of the justified type.', verbose_name='row height')), ('tiles_justified_space_between', models.PositiveSmallIntegerField(default=3, help_text='Space between the tiles justified type.', verbose_name='space between')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=0, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=False, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=False, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('name', models.CharField(max_length=255, verbose_name='name')), ], options={ 'verbose_name': 'Tiles - Justified unite options', 'verbose_name_plural': 'Tiles - Justified unite options', }, ), migrations.CreateModel( name='TilesNestedPlugin', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_aoxomoxoa_tilesnestedplugin', serialize=False, to='cms.CMSPlugin')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_auto_open', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Auto open lightbox at start. If some number gived, like 0.', null=True, verbose_name='auto open')), ('theme_enable_preloader', models.BooleanField(default=True, help_text='Enable preloader circle.', verbose_name='enable preloader')), ('theme_preloading_height', models.PositiveSmallIntegerField(default=200, help_text='The height of the preloading div, it is shown before the gallery.', verbose_name='preloading height')), ('theme_preloader_vertpos', models.PositiveSmallIntegerField(default=100, help_text='The vertical position of the preloader.', verbose_name='preloader vertical position')), ('tiles_enable_transition', models.BooleanField(default=True, help_text='enable transition on screen width change.', verbose_name='enable transition')), ('tiles_space_between_cols', models.PositiveSmallIntegerField(default=3, verbose_name='space between images')), ('tiles_space_between_cols_mobile', models.PositiveSmallIntegerField(default=3, help_text='Space between cols for mobile type.', verbose_name='space between images (mobile)')), ('tiles_min_columns', models.PositiveSmallIntegerField(default=2, help_text='Maximum number of columns, for mobile size.', verbose_name='min columns')), ('tiles_nested_optimal_tile_width', models.PositiveSmallIntegerField(default=250, verbose_name='optimal_tile_width')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=0, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=False, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=False, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('slug', models.SlugField(verbose_name='slug')), ('attributes', djangocms_attributes_field.fields.AttributesField(blank=True, default=dict, verbose_name='Attributes')), ('full_thumbnail_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tilesnested_unite_plugins_full', related_query_name='tilesnested_unite_plugin_full', to='filer.ThumbnailOption', verbose_name='fullscreen thumbnail option')), ], options={ 'verbose_name': 'Tiles - Nested plugin', 'verbose_name_plural': 'Tiles - Nested plugins', }, bases=('cms.cmsplugin', models.Model), ), migrations.CreateModel( name='TilesNestedSavedUniteOptions', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('theme_gallery_padding', models.PositiveSmallIntegerField(default=0, help_text='The horizontal padding of the gallery from the sides.', verbose_name='gallery padding')), ('theme_auto_open', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Auto open lightbox at start. If some number gived, like 0.', null=True, verbose_name='auto open')), ('theme_enable_preloader', models.BooleanField(default=True, help_text='Enable preloader circle.', verbose_name='enable preloader')), ('theme_preloading_height', models.PositiveSmallIntegerField(default=200, help_text='The height of the preloading div, it is shown before the gallery.', verbose_name='preloading height')), ('theme_preloader_vertpos', models.PositiveSmallIntegerField(default=100, help_text='The vertical position of the preloader.', verbose_name='preloader vertical position')), ('tiles_enable_transition', models.BooleanField(default=True, help_text='enable transition on screen width change.', verbose_name='enable transition')), ('tiles_space_between_cols', models.PositiveSmallIntegerField(default=3, verbose_name='space between images')), ('tiles_space_between_cols_mobile', models.PositiveSmallIntegerField(default=3, help_text='Space between cols for mobile type.', verbose_name='space between images (mobile)')), ('tiles_min_columns', models.PositiveSmallIntegerField(default=2, help_text='Maximum number of columns, for mobile size.', verbose_name='min columns')), ('tiles_nested_optimal_tile_width', models.PositiveSmallIntegerField(default=250, verbose_name='optimal_tile_width')), ('gallery_background_color', colorfield.fields.ColorField(blank=True, default='', help_text='set custom background color. If not set it will be taken from css.', max_length=18, verbose_name='background color')), ('gallery_min_width', models.PositiveSmallIntegerField(default=150, help_text='Gallery minimum width when resizing.', verbose_name='minimum width')), ('gallery_width', models.CharField(default='100%', help_text='Gallery width.', max_length=8, verbose_name='width')), ('lightbox_type', models.CharField(choices=[('compact', 'Compact'), ('wide', 'Wide')], default='wide', max_length=7, verbose_name='lightbox type')), ('lightbox_hide_arrows_onvideoplay', models.BooleanField(default=True, help_text='Hide the arrows when a video starts playing and show them when it stops.', verbose_name='hide arrows during video play')), ('lightbox_arrows_position', models.CharField(choices=[('sides', 'Sides'), ('inside', 'Inside')], default='sides', help_text='Position of the arrows, used on compact type.', max_length=6, verbose_name='lightbox type')), ('lightbox_arrows_offset', models.PositiveSmallIntegerField(default=10, help_text='The horizontal offset of the arrows.', verbose_name='arrows offset')), ('lightbox_arrows_inside_offset', models.PositiveSmallIntegerField(default=10, help_text='The offset from the image border if the arrows are placed inside.', verbose_name='arrows inside offset')), ('lightbox_arrows_inside_alwayson', models.BooleanField(default=False, help_text='Show the arrows on mouseover, or always on.', verbose_name='arrows always on')), ('lightbox_overlay_color', colorfield.fields.ColorField(blank=True, default='', help_text='The color of the overlay. If null - will take from CSS.', max_length=18, verbose_name='overlay color')), ('lightbox_overlay_opacity', models.PositiveSmallIntegerField(default=100, help_text='The opacity of the overlay. for compact type 60%.', validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('lightbox_top_panel_opacity', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The opacity of the top panel.', null=True, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='top panel opacity (%)')), ('lightbox_close_on_emptyspace', models.BooleanField(default=False, verbose_name='close on empty space')), ('lightbox_show_numbers', models.BooleanField(default=True, help_text='Show numbers on the right side.', verbose_name='show numbers')), ('lightbox_numbers_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The size of the numbers string.', null=True, verbose_name='numbers size')), ('lightbox_numbers_color', colorfield.fields.ColorField(blank=True, default='', max_length=18, verbose_name='numbers color')), ('lightbox_numbers_padding_top', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The top padding of the numbers (used in compact mode).', null=True, verbose_name='numbers top padding')), ('lightbox_numbers_padding_right', models.PositiveSmallIntegerField(blank=True, default=None, help_text='The right padding of the numbers (used in compact mode).', null=True, verbose_name='numbers right padding')), ('lightbox_slider_image_border', models.BooleanField(default=True, help_text='Enable border around the image (for compact type only).', verbose_name='slider image border')), ('lightbox_slider_image_border_width', models.PositiveSmallIntegerField(default=10, help_text='Image border width.', verbose_name='image border width')), ('lightbox_slider_image_border_color', colorfield.fields.ColorField(default='#FFFFFF', max_length=18, verbose_name='image border color')), ('lightbox_slider_image_border_radius', models.PositiveSmallIntegerField(default=0, verbose_name='image border radius')), ('lightbox_slider_image_shadow', models.BooleanField(default=True, verbose_name='slider image shadow')), ('lightbox_slider_control_swipe', models.BooleanField(default=True, help_text='Enable swiping control.', verbose_name='slider control swipe')), ('lightbox_slider_control_zoom', models.BooleanField(default=True, help_text='Enable zooming control.', verbose_name='slider control zoom')), ('tile_as_link', models.BooleanField(default=False, help_text='Make the tile act the tile as a link, no lightbox will appear.', verbose_name='act as link')), ('tile_border_color', colorfield.fields.ColorField(default='#F0F0F0', max_length=18, verbose_name='border color')), ('tile_border_radius', models.PositiveSmallIntegerField(default=0, help_text='Tile border radius (applied to border only, not to outline).', verbose_name='border radius')), ('tile_border_width', models.PositiveSmallIntegerField(default=3, help_text='Tile border width.', verbose_name='border width')), ('tile_enable_action', models.BooleanField(default=True, help_text='Enable tile action on click like lightbox.', verbose_name='enable action')), ('tile_enable_border', models.BooleanField(default=False, help_text='Enable border of the tile.', verbose_name='enable border')), ('tile_enable_icons', models.BooleanField(default=True, help_text='Enable icons in mouseover mode.', verbose_name='enable icons')), ('tile_enable_image_effect', models.BooleanField(default=False, verbose_name='enable image effect')), ('tile_enable_outline', models.BooleanField(default=False, help_text='Enable outline of the tile (works only together with the border).', verbose_name='enable outline')), ('tile_enable_overlay', models.BooleanField(default=True, help_text='Enable tile color overlay (on mouseover).', verbose_name='enable overlay')), ('tile_enable_shadow', models.BooleanField(default=True, help_text='Enable shadow of the tile.', verbose_name='enable shadow')), ('tile_image_effect_reverse', models.BooleanField(default=False, help_text='Reverce the image, set only on mouseover state.', verbose_name='enable effect reverse')), ('tile_image_effect_type', models.CharField(choices=[('bw', 'Black and white'), ('blur', 'Blur'), ('sepia', 'Sepia')], default='bw', max_length=16, verbose_name='image effect type')), ('tile_link_newpage', models.BooleanField(default=True, help_text='Open the tile link in new page.', verbose_name='link newpage')), ('tile_outline_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='outline color')), ('tile_overlay_color', colorfield.fields.ColorField(default='#000000', help_text='Tile overlay color.', max_length=18, verbose_name='overlay color')), ('tile_overlay_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='overlay opacity (%)')), ('tile_shadow_blur', models.PositiveSmallIntegerField(default=3, help_text='Shadow blur.', verbose_name='shadow blur')), ('tile_shadow_color', colorfield.fields.ColorField(default='#8B8B8B', max_length=18, verbose_name='shadow color')), ('tile_shadow_h', models.PositiveSmallIntegerField(default=1, help_text='Position of horizontal shadow.', verbose_name='shadow horizontal offset')), ('tile_shadow_spread', models.PositiveSmallIntegerField(default=2, verbose_name='shadow spread')), ('tile_shadow_v', models.PositiveSmallIntegerField(default=1, help_text='Position of vertical shadow.', verbose_name='shadow vertical offset')), ('tile_show_link_icon', models.BooleanField(default=False, help_text='show link icon (if the tile has a link). In case of tile_as_link this option not enabled.', verbose_name='show link icon')), ('tile_space_between_icons', models.PositiveSmallIntegerField(default=26, help_text='Initial space between icons, (on small tiles it may change).', verbose_name='space between icons')), ('tile_enable_textpanel', models.BooleanField(default=False, verbose_name='enable')), ('tile_textpanel_always_on', models.BooleanField(default=False, verbose_name='always on')), ('tile_textpanel_appear_type', models.CharField(choices=[('slide', 'Slide'), ('fade', 'Fade')], default='slide', help_text='Appear type of the textpanel on mouseover.', max_length=5, verbose_name='appear type')), ('tile_textpanel_bg_color', colorfield.fields.ColorField(default='#000000', max_length=18, verbose_name='background color')), ('tile_textpanel_bg_css', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel background CSS.', verbose_name='background CSS')), ('tile_textpanel_bg_opacity', models.PositiveSmallIntegerField(default=40, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='background opacity (%)')), ('tile_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Description additional CSS.', verbose_name='description CSS')), ('tile_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Title additional CSS.', verbose_name='description CSS')), ('tile_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('tile_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from css.', max_length=18, verbose_name='description color')), ('tile_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('tile_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('tile_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('tile_textpanel_offset', models.PositiveSmallIntegerField(default=0, verbose_name='vertical offset')), ('tile_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=8, verbose_name='bottom padding')), ('tile_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('tile_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('tile_textpanel_padding_top', models.PositiveSmallIntegerField(default=8, verbose_name='top padding')), ('tile_textpanel_position', models.CharField(choices=[('inside_bottom', 'Inside bottom'), ('inside_top', 'Inside top'), ('inside_center', 'Inside center'), ('top', 'Top'), ('bottom', 'Bottom')], default='bottom', help_text='The position of the textpanel.', max_length=13, verbose_name='text panel position')), ('tile_textpanel_source', models.CharField(choices=[('title', 'Title'), ('desc', 'Description'), ('desc_title', 'Description or title')], default='title', help_text='Source of the textpanel.', max_length=10, verbose_name='text panel source')), ('tile_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='bold title')), ('tile_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('tile_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='title font family')), ('tile_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from CSS.', null=True, verbose_name='title font size (px)')), ('tile_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='title text alignment')), ('lightbox_show_textpanel', models.BooleanField(default=True, help_text='Show the text panel.', verbose_name='show text panel')), ('lightbox_textpanel_css_description', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the description.', verbose_name='description CSS')), ('lightbox_textpanel_css_title', djangocms_attributes_field.fields.AttributesField(blank=True, default={}, help_text='Textpanel additional CSS of the title.', verbose_name='title CSS')), ('lightbox_textpanel_desc_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_desc_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel description text color. If blank take from CSS.', max_length=18, verbose_name='description color')), ('lightbox_textpanel_desc_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the description.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_desc_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel description font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_desc_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel description text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_enable_description', models.BooleanField(default=False, help_text='Enable the description text.', verbose_name='enable description')), ('lightbox_textpanel_enable_title', models.BooleanField(default=True, help_text='Enable the title text.', verbose_name='enable title')), ('lightbox_textpanel_padding_bottom', models.PositiveSmallIntegerField(default=5, help_text='Textpanel bottom padding.', verbose_name='bottom padding')), ('lightbox_textpanel_padding_left', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from left.', verbose_name='left padding')), ('lightbox_textpanel_padding_right', models.PositiveSmallIntegerField(default=11, help_text='Cut some space for text from right.', verbose_name='right padding')), ('lightbox_textpanel_padding_top', models.PositiveSmallIntegerField(default=5, help_text='Textpanel top padding.', verbose_name='top padding')), ('lightbox_textpanel_title_bold', models.NullBooleanField(default=None, verbose_name='Bold description')), ('lightbox_textpanel_title_color', colorfield.fields.ColorField(blank=True, default='', help_text='Textpanel title color. If blank take from CSS.', max_length=18, verbose_name='title color')), ('lightbox_textpanel_title_font_family', models.CharField(blank=True, default='', help_text='A CSS font family for the title.', max_length=255, verbose_name='description font family')), ('lightbox_textpanel_title_font_size', models.PositiveSmallIntegerField(blank=True, default=None, help_text='Textpanel title font size. If blank take from css.', null=True, verbose_name='description font size (px)')), ('lightbox_textpanel_title_text_align', models.CharField(blank=True, choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')], default='', help_text='Textpanel title text alignment. If blank take from CSS.', max_length=6, verbose_name='description text alignment')), ('lightbox_textpanel_width', models.PositiveSmallIntegerField(default=550, help_text='the width of the text panel. wide type only.', verbose_name='width')), ('name', models.CharField(max_length=255, verbose_name='name')), ], options={ 'verbose_name': 'Tiles - Nested unite options', 'verbose_name_plural': 'Tiles - Nested unite options', }, ), migrations.AddField( model_name='tilesnestedplugin', name='saved_conf', field=models.ForeignKey(blank=True, help_text='Override the unite options with the values from the selected saved configuration.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instances', related_query_name='instance', to='djangocms_aoxomoxoa.TilesNestedSavedUniteOptions', verbose_name='saved configuration'), ), migrations.AddField( model_name='tilesnestedplugin', name='thumbnail_thumbnail_option', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tilesnested_unite_plugins_thumbnail', related_query_name='tilesnested_unite_plugin_thumbnail', to='filer.ThumbnailOption', verbose_name='thumbnail thumbnail option'), ), migrations.AddField( model_name='tilesjustifiedplugin', name='saved_conf', field=models.ForeignKey(blank=True, help_text='Override the unite options with the values from the selected saved configuration.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instances', related_query_name='instance', to='djangocms_aoxomoxoa.TilesJustifiedSavedUniteOptions', verbose_name='saved configuration'), ), migrations.AddField( model_name='tilesjustifiedplugin', name='thumbnail_thumbnail_option', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tilesjustified_unite_plugins_thumbnail', related_query_name='tilesjustified_unite_plugin_thumbnail', to='filer.ThumbnailOption', verbose_name='thumbnail thumbnail option'), ), migrations.AddField( model_name='tilesgridplugin', name='saved_conf', field=models.ForeignKey(blank=True, help_text='Override the unite options with the values from the selected saved configuration.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instances', related_query_name='instance', to='djangocms_aoxomoxoa.TilesGridSavedUniteOptions', verbose_name='saved configuration'), ), migrations.AddField( model_name='tilesgridplugin', name='thumbnail_thumbnail_option', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tilesgrid_unite_plugins_thumbnail', related_query_name='tilesgrid_unite_plugin_thumbnail', to='filer.ThumbnailOption', verbose_name='thumbnail thumbnail option'), ), migrations.AddField( model_name='tilescolumnsplugin', name='saved_conf', field=models.ForeignKey(blank=True, help_text='Override the unite options with the values from the selected saved configuration.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instances', related_query_name='instance', to='djangocms_aoxomoxoa.TilesColumnsSavedUniteOptions', verbose_name='saved configuration'), ), migrations.AddField( model_name='tilescolumnsplugin', name='thumbnail_thumbnail_option', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tilescolumns_unite_plugins_thumbnail', related_query_name='tilescolumns_unite_plugin_thumbnail', to='filer.ThumbnailOption', verbose_name='thumbnail thumbnail option'), ), migrations.AddField( model_name='sliderplugin', name='saved_conf', field=models.ForeignKey(blank=True, help_text='Override the unite options with the values from the selected saved configuration.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instances', related_query_name='instance', to='djangocms_aoxomoxoa.SliderSavedUniteOptions', verbose_name='saved configuration'), ), migrations.AddField( model_name='sliderplugin', name='thumbnail_thumbnail_option', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='slider_unite_plugins_thumbnail', related_query_name='slider_unite_plugin_thumbnail', to='filer.ThumbnailOption', verbose_name='thumbnail thumbnail option'), ), migrations.AddField( model_name='gridthemeplugin', name='saved_conf', field=models.ForeignKey(blank=True, help_text='Override the unite options with the values from the selected saved configuration.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instances', related_query_name='instance', to='djangocms_aoxomoxoa.GridThemeSavedUniteOptions', verbose_name='saved configuration'), ), migrations.AddField( model_name='gridthemeplugin', name='thumbnail_thumbnail_option', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gridtheme_unite_plugins_thumbnail', related_query_name='gridtheme_unite_plugin_thumbnail', to='filer.ThumbnailOption', verbose_name='thumbnail thumbnail option'), ), migrations.AddField( model_name='defaultthemeplugin', name='saved_conf', field=models.ForeignKey(blank=True, help_text='Override the unite options with the values from the selected saved configuration.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instances', related_query_name='instance', to='djangocms_aoxomoxoa.DefaultThemeSavedUniteOptions', verbose_name='saved configuration'), ), migrations.AddField( model_name='defaultthemeplugin', name='thumbnail_thumbnail_option', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='defaulttheme_unite_plugins_thumbnail', related_query_name='defaulttheme_unite_plugin_thumbnail', to='filer.ThumbnailOption', verbose_name='thumbnail thumbnail option'), ), migrations.AddField( model_name='compactthemeplugin', name='saved_conf', field=models.ForeignKey(blank=True, help_text='Override the unite options with the values from the selected saved configuration.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instances', related_query_name='instance', to='djangocms_aoxomoxoa.CompactThemeSavedUniteOptions', verbose_name='saved configuration'), ), migrations.AddField( model_name='compactthemeplugin', name='thumbnail_thumbnail_option', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='compacttheme_unite_plugins_thumbnail', related_query_name='compacttheme_unite_plugin_thumbnail', to='filer.ThumbnailOption', verbose_name='thumbnail thumbnail option'), ), migrations.AddField( model_name='carouselplugin', name='saved_conf', field=models.ForeignKey(blank=True, help_text='Override the unite options with the values from the selected saved configuration.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instances', related_query_name='instance', to='djangocms_aoxomoxoa.CarouselSavedUniteOptions', verbose_name='saved configuration'), ), migrations.AddField( model_name='carouselplugin', name='thumbnail_thumbnail_option', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_unite_plugins_thumbnail', related_query_name='carousel_unite_plugin_thumbnail', to='filer.ThumbnailOption', verbose_name='thumbnail thumbnail option'), ), ]
/ripiu.djangocms_aoxomoxoa-0.0.4.tar.gz/ripiu.djangocms_aoxomoxoa-0.0.4/ripiu/djangocms_aoxomoxoa/migrations/0001_initial.py
0.660939
0.210868
0001_initial.py
pypi
# riposte [![Build Status](https://travis-ci.org/fwkz/riposte.svg?branch=master)](https://travis-ci.org/fwkz/riposte) [![License](https://img.shields.io/pypi/l/riposte.svg)](https://github.com/fwkz/riposte/blob/master/LICENS) [![Version](https://img.shields.io/pypi/v/riposte.svg)](https://pypi.org/project/riposte/) [![Python](https://img.shields.io/pypi/pyversions/riposte.svg)](https://pypi.org/project/riposte/) [![Code Style](https://img.shields.io/badge/codestyle-black-black.svg)](https://github.com/ambv/black) _Riposte_ allows you to easily wrap your application inside a tailored interactive shell. Common chores regarding building REPLs was factored out and being taken care of so you can really focus on specific domain logic of your application. The motivation for building _Riposte_ coming from many sleepless nights of handling numerous tricky cases regarding REPLs during [routersploit](https://github.com/threat9/routersploit) development. Like every other project it began very innocently but after a while, when the project got some real traction and code base was rapidly growing, shell logic started to intertwine with domain logic making things less and less readable and contributor friendly. Moreover, to our surprise, people started to fork [routersploit](https://github.com/threat9/routersploit) not because they were interested in the security of embedded devices but simply because they want to leverage our interactive shell logic and build their own tools using similar concept. All these years they must have said: _"There must be a better way!"_ and they were completely right, the better way is called _Riposte_. ## Table of contents * [Getting started](#getting-started) * [Installing](#installing) * [Example usage](#example-usage) * [Manual](#manual) * [Command](#command) * [Completer](#completer) * [Guides](#guides) * [Printing](#printing) * [History](#history) * [Prompt](#Prompt) * [Project status](#project-status) * [Contributing](#contributing) * [Versioning](#versioning) * [License](#license) * [Acknowledgments](#acknowledgments) ## Getting Started ### Installing The package is available on PyPI so please use [pip](https://pip.pypa.io/en/stable/quickstart/) to install it: ```bash pip install riposte ``` Riposte supports Python 3.6 and newer. ### Example usage ```python from riposte import Riposte calculator = Riposte(prompt="calc:~$ ") MEMORY = [] @calculator.command("add") def add(x: int, y: int): result = f"{x} + {y} = {x + y}", MEMORY.append(result) calculator.success(result) @calculator.command("multiply") def multiply(x: int, y: int): result = f"{x} * {y} = {x * y}", MEMORY.append(result) calculator.success(result) @calculator.command("memory") def memory(): for entry in MEMORY: calculator.print(entry) calculator.run() ``` ```bash calc:~$ add 2 2 [+] 2 + 2 = 4 calc:~$ multiply 3 3 [+] 3 * 3 = 9 calc:~$ memory 2 + 2 = 4 3 * 3 = 9 calc:~$ ``` ## Manual ### Command First and foremost you want to register some commands to make your REPL actionable. Adding command and bounding it with handling function is possible through `Riposte.command` decorator. ```python from riposte import Riposte repl = Riposte() @repl.command("hello") def hello(): repl.success("Is it me you looking for?") repl.run() ``` ```bash riposte:~ $ hello [+] Is it me you looking for? ``` Additionally `Riposte.command` accepts few optional parameters: * `description` few words describing command which you can later use to build meaningful help * [`guides`](#guides) definition of how to interpret passed arguments ### Completer `Riposte` comes with support for tab-completion for commands. You can register completer function in a similar way you registering commands, just use `Riposte.complete` decorator and point it to a specific command. ```python from riposte import Riposte repl = Riposte() START_SUBCOMMANDS = ["foo", "bar"] @repl.command("start") def start(subcommand: str): if subcommand in START_SUBCOMMANDS: repl.status(f"{subcommand} started") else: repl.error("Unknown subcommand.") @repl.complete("start") def start_completer(text, line, start_index, end_index): return [ subcommand for subcommand in START_SUBCOMMANDS if subcommand.startswith(text) ] repl.run() ``` Completer function is triggered by the TAB key. Every completer function should return list of valid options and should accept the following parameters: * `text` last word in the line * `line` content of the whole line * `start_index` starting index of the last word in the line * `end_index` ending index of the last word in the line So in the case of our example: `riposte:~ $ start ba<TAB>` ``` text -> "ba" line -> "start ba" start_index -> 6 end_index -> 8 ``` Equipped with this information you can build your custom completer functions for every command. ### Guides Guides is a way of saying how [command](#command) should interpret arguments passed by the user via prompt. `Riposte` rely on [type-hints](https://docs.python.org/3/library/typing.html) in order to do that. ```python from riposte import Riposte repl = Riposte() @repl.command("guideme") def guideme(x: int, y: str): repl.print("x:", x, type(x)) repl.print("y:", y, type(y)) repl.run() ``` ```bash riposte:~ $ guideme 1 1 x: 1 <class 'int'> y: 1 <class 'str'> ``` In both cases we've passed value _1_ as `x` and `y`. Based on parameter's type-hint passed arguments was interpreted as `int` in case of `x` and as `str` in case of `y`. You can also use this technique for different types. ```python from riposte import Riposte repl = Riposte() @repl.command("guideme") def guideme(x: dict, y: list): x["foo"] = "bar" repl.print("x:", x, type(x)) y.append("foobar") repl.print("y:", y, type(y)) repl.run() ``` ```bash riposte:~ $ guideme "{'bar': 'baz'}" "['barbaz']" x: {'bar': 'baz', 'foo': 'bar'} <class 'dict'> y: ['barbaz', 'foobar'] <class 'list'> ``` Another more powerful way of defining guides for handling function parameters is defining it straight from`Riposte.command` decorator. In this case guide defined this way take precedence over the type hints. ```python from riposte import Riposte repl = Riposte() @repl.command("guideme", guides={"x": [int]}) def guideme(x): repl.print("x:", x, type(x)) repl.run() ``` ```bash riposte:~ $ guideme 1 x: 1 <class 'int'> ``` Why it is more powerful? Because this way you can chain different guides, where output of one guide is input for another, creating validation or cast input into more complex types. ```python from collections import namedtuple from riposte import Riposte from riposte.exceptions import RiposteException from riposte.guides import literal repl = Riposte() def non_negative(value: int): if value < 0: raise RiposteException("Value can't be negative") return value Point = namedtuple("Point", ("x", "y")) def get_point(value: dict): return Point(**value) @repl.command("guideme", guides={"x": [int, non_negative], "y": [literal, get_point]}) def guideme(x, y): repl.print("x:", x, type(x)) repl.print("y:", y, type(y)) repl.run() ``` ```bash riposte:~ $ guideme -1 '{"x": 1, "y": 2}' [-] Value can't be negative riposte:~ $ guideme 1 '{"x": 1, "y": 2}' x: 1 <class 'int'> y: Point(x=1, y=2) <class '__main__.Point'> riposte:~ $ ``` Under the hood, it is a simple function call where the input string is passed to first guide function in the chain. In this case, the call looks like this: ```python non_negative(int("-1")) # guide chain for parameter `x` get_point(literal('{"x": 1, "y": 2}')) # guide chain for parameter `y` ``` ### Printing _Riposte_ comes with built-in thread safe printing methods: * `print` * `info` * `error` * `status` * `success` Every method follows the signature of Python's built-in [`print()`](https://docs.python.org/3/library/functions.html#print) function. Besides `print` all of them provide informative coloring corresponding to its name. We strongly encourage to stick to our thread safe printing API but if you are feeling frisky, know what you are doing and you are 100% sure, that threaded execution is something that will never come up at some point in the lifecycle of you application feel free to use Python's built-in [`print()`](https://docs.python.org/3/library/functions.html#print) function. #### Extending `PrinterMixin` If you want to change the styling of existing methods or add custom one, please extend `PrinterMixin` class. ```python from riposte import Riposte from riposte.printer.mixins import PrinterMixin class ExtendedPrinterMixin(PrinterMixin): def success(self, *args, **kwargs): # overwriting existing method self.print(*args, **kwargs) def shout(self, *args, **kwargs): # adding new one self.print((*args, "!!!"), **kwargs) class CustomRiposte(Riposte, ExtendedPrinterMixin): pass repl = CustomRiposte() @repl.command("foobar") def foobar(message: str): repl.shout(message) ``` #### Customizing `PrinterMixin` Not happy about existing printing API? No problem, you can also build your own from scratch using `PrinterBaseMixin` and its thread safe `_print` method. ```python from riposte import Riposte from riposte.printer.mixins import PrinterBaseMixin class CustomPrinterMixin(PrinterBaseMixin): def ask(self, *args, **kwargs): # adding new one self._print((*args, "???"), **kwargs) def shout(self, *args, **kwargs): # adding new one self._print((*args, "!!!"), **kwargs) class CustomRiposte(Riposte, CustomPrinterMixin): pass repl = CustomRiposte() @repl.command("foobar") def foobar(message: str): repl.shout(message) repl.ask(message) repl.success(message) # It'll raise exception as it's no longer available ``` #### Coloring output with `Pallete` If you feel like adding a few colors to the output you can always use `Pallete`. ```python from riposte import Riposte from riposte.printer import Palette repl = Riposte() @repl.command("foo") def foo(msg: str): repl.print(Palette.GREEN.format(msg)) # It will be green ``` `Pallete` goes with the following output formattings: * `GREY` * `RED` * `GREEN` * `YELLOW` * `BLUE` * `MAGENTA` * `CYAN` * `WHITE` * `BOLD` ### History Command history is stored in your HOME directory in `.riposte` file. The default length is 100 lines. Both settings can be changed using `history_file` and `history_length` parameters. ```python from pathlib import Path from riposte import Riposte repl = Riposte( history_file=Path.home() / ".custom_history_file", history_length=500, ) ``` ### Prompt The default prompt is `riposte:~ $ ` but you can easily customize it: ```python from riposte import Riposte repl = Riposte(prompt="custom-prompt >>> ") repl.run() ``` You can also dynamically resolve prompt layout based on the state of some object simply by overwriting `Riposte.prompt` property. In the following example, we'll determine prompt based on `MODULE` value: ```python from riposte import Riposte MODULE = None class CustomRiposte(Riposte): @property def prompt(self): if MODULE: return f"foo:{MODULE} > " else: return self._prompt # reference to `prompt` parameter. repl = CustomRiposte(prompt="foo > ") @repl.command("set") def set_module(module_name: str): global MODULE MODULE = module_name repl.success("Module has been set.") @repl.command("unset") def unset_module(): global MODULE MODULE = None repl.success("Module has been unset.") repl.run() ``` ```bash foo > set bar [+] Module has been set. foo:bar > unset [+] Module has been unset. foo > ``` ## Project status _Riposte_ is under development. It might be considered to be in alpha phase. There might be some breaking changes in the future although a lot of concepts present here was already battle-tested during [routersploit](https://github.com/threat9/routersploit) development. ## Contributing Please read [CONTRIBUTING.md]() for details on our code of conduct, and the process for submitting pull requests to us. ## Versioning Project uses [SemVer](http://semver.org/) versioning. For the versions available, see the [releases](https://github.com/fwkz/riposte/releases). ## License _Riposte_ is licensed under the MIT License - see the [LICENSE](https://github.com/fwkz/riposte/blob/master/LICENSE) file for details ## Acknowledgments * [routersploit](https://github.com/threat9/routersploit) * [click](https://click.palletsprojects.com/)
/riposte-0.2.2.tar.gz/riposte-0.2.2/README.md
0.466846
0.963022
README.md
pypi
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from ripozo.exceptions import NotFoundException from ripozo.manager_base import BaseManager from ripozo.utilities import make_json_safe from ripozo import fields from cassandra.cqlengine.query import DoesNotExist, Token import logging import six _LOGGER = logging.getLogger(__name__) _COLUMN_FIELD_MAP = { 'ascii': fields.StringField, 'inet': fields.StringField, 'text': fields.StringField, 'varchar': fields.StringField, 'timeuuid': fields.StringField, 'uuid': fields.StringField, 'bigint': fields.IntegerField, 'counter': fields.IntegerField, 'int': fields.IntegerField, 'varint': fields.IntegerField, 'boolean': fields.BooleanField, 'double': fields.FloatField, 'float': fields.FloatField, 'decimal': fields.FloatField, 'map': fields.DictField, 'list': fields.ListField, 'set': fields.ListField } class CQLManager(BaseManager): """ Works with serializing the models as json and deserializing them to cqlengine models :param cassandra.cqlengine.models.Model model: """ fail_create_if_exists = True allow_filtering = False @classmethod def get_field_type(cls, name): col = cls.model._columns[name] db_type = col.db_type if db_type in _COLUMN_FIELD_MAP: field_class = _COLUMN_FIELD_MAP[db_type] return field_class(name) return fields.BaseField(name) @property def queryset(self): return self.model.objects.all() def create(self, values, *args, **kwargs): """ Creates an object using the specified values in the dict :param values: A dictionary with the attribute names as keys and the attribute values as values :type values: dict :return: Cassandra model object :rtype: cqlengine.Model """ _LOGGER.info('Creating model of type %s', self.model.__name__) values = self.valid_fields(values, self.create_fields) if self.fail_create_if_exists: obj = self.model.if_not_exists().create(**values) else: obj = self.model.create(**values) return self.serialize_model(obj) def retrieve(self, lookup_keys, *args, **kwargs): """ Retrieves an existing object using the lookupkeys :param lookup_keys: A dictionary with the attribute names as keys and the attribute values as values :type lookup_keys: dict :return: The specified model using the lookup keys :rtype: dict """ _LOGGER.info('Retrieving model of type %s', self.model.__name__) obj = self._get_model(lookup_keys) return self.serialize_model(obj) def retrieve_list(self, filters, *args, **kwargs): """ Retrieves a list of all models that match the specified filters :param filters: The named parameters to filter the models on :type filters: dict :return: tuple 0 index = a list of the models as dictionary objects 1 index = the query args for retrieving the next in pagination :rtype: list """ logger = logging.getLogger(__name__) logger.info('Retrieving list of models of type %s with ' 'filters: %s', str(self.model), filters) obj_list = [] models = self.queryset if self.allow_filtering: logger.debug('Allowing filtering on list retrieval') models = models.allow_filtering() pagination_count, filters = self.get_pagination_count(filters) last_pagination_pk, filters = self.get_pagination_pks(filters) if not last_pagination_pk: last_pagination_pk = [] if filters is not None: for key, value in six.iteritems(filters): models = models.filter(getattr(self.model, key) == value) if self.order_by is not None: models = models.order_by(self.order_by) models = self.pagination_filtration(models, last_pagination_pk=last_pagination_pk, filters=filters) models = models.limit(pagination_count + 1) last_model = None # Handle the extra model used for finding the next batch if len(models) > pagination_count: last_model = models[-1] models = models[:pagination_count] for obj in models: obj_list.append(self.serialize_model(obj)) if not pagination_count or not last_model: return obj_list, {self.pagination_pk_query_arg: None, self.pagination_count_query_arg: pagination_count, self.pagination_next: None} else: query_args, pagination_keys = self.get_next_query_args(last_model, pagination_count, filters=filters) return obj_list, {self.pagination_pk_query_arg: pagination_keys, self.pagination_count_query_arg: pagination_count, self.pagination_next: query_args} def update(self, lookup_keys, updates, *args, **kwargs): """ Updates the model specified by the lookup_key with the specified updates :param lookup_keys: :type lookup_keys: dict :param updates: :type updates: dict :return: :rtype: cqlengine.Model """ _LOGGER.info('Updating model of type %s', self.model.__name__) obj = self._get_model(lookup_keys) updates = self.valid_fields(updates, self.update_fields) for key, value in six.iteritems(updates): setattr(obj, key, value) obj.save() return self.serialize_model(obj) def delete(self, lookup_keys, *args, **kwargs): """ Deletes the model specified by the lookup_keys :param lookup_keys: A dictionary of fields and values on model to filter by :type lookup_keys: dict """ _LOGGER.info('Deleting model of type %s', self.model.__name__) obj = self._get_model(lookup_keys) obj.delete() return {} def _get_model(self, lookup_keys): """ Gets the model specified by the lookupkeys :param lookup_keys: A dictionary of fields and values on the model to filter by :type lookup_keys: dict """ queryset = self.queryset for key, value in six.iteritems(lookup_keys): queryset = queryset.filter(getattr(self.model, key) == value) try: obj = queryset.get() return obj except DoesNotExist: raise NotFoundException('The model {0} could not be found. ' 'lookup_keys: {1}'.format(self.model.__name__, lookup_keys)) def get_next_query_args(self, last_model, pagination_count, filters=None): filters = filters or {} if last_model is None: return None, None query_args = '{0}={1}'.format(self.pagination_count_query_arg, pagination_count) for filter_name, filter_value in six.iteritems(filters): query_args = '{0}&{1}={2}'.format(query_args, filter_name, filter_value) pagination_keys = [] for p_name in last_model._primary_keys: value = getattr(last_model, p_name) query_args = '{0}&{1}={2}'.format(query_args, self.pagination_pk_query_arg, value) pagination_keys.append(value) return query_args, pagination_keys def pagination_filtration(self, queryset, last_pagination_pk=None, filters=None): if filters is None: return queryset if last_pagination_pk is None: last_pagination_pk = [] if len(last_pagination_pk) == 0: return queryset partition_key_count = len(self.model._partition_keys) if len(dict(filters.items() + self.model._partition_keys.items())) < len(filters) + len(self.model._partition_keys): # There is some overlap between the partition keys filters # TODO make a better way to do filtering for i in range(len(self.model._partition_keys)): key = self.model._partition_keys.items()[i][0] if key in filters: continue value = last_pagination_pk[i] queryset = queryset.filter(**{'{0}__gte'.format(key): value}) else: queryset = queryset.filter(pk__token__gte=Token(last_pagination_pk)) if len(self.model._primary_keys) <= partition_key_count: return queryset clustering_pagination = last_pagination_pk[partition_key_count:] for i in range(len(clustering_pagination)): key = self.model._clustering_keys.items()[i][0] if key in filters: continue value = clustering_pagination[i] queryset = queryset.filter(getattr(self.model, key) >= value) return queryset def serialize_model(self, obj, fields_list=None): """ Takes a cqlengine.Model and jsonifies it. This got much easier recently. It also, makes the dictionary safe to immediately call json.dumps on it. :param obj: The model instance to jsonify :type obj: cqlengine.Model :return: python dictionary with field names and values :rtype: dict """ fields_list = fields_list or self.fields base = dict(obj) base = self.valid_fields(base, fields_list) return make_json_safe(base)
/ripozo-cassandra-0.2.1.tar.gz/ripozo-cassandra-0.2.1/ripozo_cassandra/cqlmanager.py
0.838184
0.214619
cqlmanager.py
pypi
# How to turn your database into a ReSTful API in under 10 lines of code A package to turn your database into a ReSTful API in just 10 lines of code. You can either install the package and simply start the API from the command line or you can follow this tutorial to build it yourself. ## Installation ```bash pip install ripozo-oasis ``` ## Running your API To turn your database into a ReSTful API simply run the following command. ```bash auto-api "mysql://localhost:3306/mydatabase" ``` You will need to pass a Database URI according to the [SQLALchemy Engine Configuration documentation](http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html). The format is `'dialect+driver://username:password@host:port/database_name'`. The driver is optional and only necessary if you do not wish to use the default. If you get import errors you will need to install a specific driver for your database. For example, with MySQL you'll need to run ``pip install mysql-python`` or for postgresql you'll need to run ``pip install psycopg2``. Now we can curl the base to get all available endpoints ```bash curl -X OPTIONS http://localhost:5000/ ``` We may see something like this assuming we had two tables, groups and users. ```javascript { "_embedded": {}, "_links": { "group": { "href": "/group/" }, "user": { "href": "/user/" }, "self": { "href": "http://localhost:5000/" } } } ``` We could additionally vary the accept type to get a SIREN formatted response ```bash curl -X OPTIONS -H "Accept: application/vnd.siren+json" http://localhost:5000/ ``` ```javascript { "entities": [], "class": [ "" ], "links": [ { "href": "http://localhost:5000/", "rel": [ "self" ] }, { "href": "http://localhost:5000/group/", "rel": [ "group_list" ] }, { "href": "http://localhost:5000/group/<id>/", "rel": [ "group" ] }, { "href": "http://localhost:5000/user/", "rel": [ "user_list" ] }, { "href": "http://localhost:5000/user/<id>/", "rel": [ "user" ] } ], "actions": [ { "fields": [], "href": "http://localhost:5000/", "title": "All Options", "method": "OPTIONS", "name": "all_options" } ], "properties": {} } ``` We have full CRUD+L (Create, Retrieve, Update, Delete and List) operations: a POST to ``/user/`` creates a new user, a GET to ``/user/`` returns a list of all users, a GET on ``/user/<id>/`` returns an individual user and so forth. ## Tutorial This tutorial uses a powerful and extensible ReST framework called [ripozo](https://github.com/vertical-knowledge/ripozo) and a couple packages in the ripozo ecosystem: [flask-ripozo](https://github.com/vertical-knowledge/flask-ripozo) and [ripozo-sqlalchemy](https://github.com/vertical-knowledge/ripozo-sqlalchemy). Ripozo is web framework independent, meaning you can use it in any desired web framework. Official integrations include [flask-ripozo](https://github.com/vertical-knowledge/flask-ripozo) and [django-ripozo](https://github.com/vertical-knowledge/django-ripozo) with more to come. In addition to building seamless ReSTful API's, ripozo can expose [Hypermedia/HATEOAS](http://roy.gbiv.com/untangled/2008/rest-apis-must-be-hypertext-driven) driven API's with *no additional effort*. ### Step 1: Create the Flask App The first step is to setup our [Flask](https://github.com/mitsuhiko/flask) application. You can use [django-ripozo](https://github.com/vertical-knowledge/django-ripozo) with minimal deviations from this tutorial. Unfortunately, bootstrapping a django project requires more than 10 lines of code. First, install Flask. ```bash pip install Flask ``` Now instantiate a Flask Application. ```python from flask import Flask app = Flask(__name__) ``` ### Step 2: Setup SQLAlchemy [SQLAlchemy](http://www.sqlalchemy.org/) is another favorite tool of mine. It provides an excellent ORM and allows us to generate an ORM from an existing database with no additional work. ```bash pip install SQLAlchemy ``` Creating a sqlalchemy engine is incredibly simple. We simply pass SQLAlchemy a database URI in the expected format. See the [Engine Configuration documentation](http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html) for more details. TL;DR this is the general format: `'dialect+driver://username:password@host:port/database_name'` ```python from sqlalchemy import create_engine from sqlalchemy.ext.automap import automap_base database_uri = 'mysql://localhost:3306/mydatabase' engine = create_engine(database_uri) base = automap_base() base.prepare(engine, reflect=True) ``` This creates an ORM where we can access tables in the database as python objects. ### Step 3: Bind ripozo Now that we have our database and web application working, we need to bind [ripozo](https://github.com/vertical-knowledge/ripozo) to Flask and SQLAlchemy. ```bash pip install ripozo flask-ripozo ripozo-sqlalchemy ``` ```python from flask_ripozo import FlaskDispatcher from ripozo import adapters from ripozo_sqlalchemy import ScopedSessionHandler # Attach the previously create Flask application dispatcher = FlaskDispatcher(app) # Adapters inform ripozo how to represent the resources over HTTP (typically a # protocol for a JSON response). In this case, we've chosen the Hal and SIREN protocols. dispatcher.register_adapters(adapters.SirenAdapter, adapters.HalAdapter) # Create a session handler to cleanly handle database transactions and cleanup session_handler = ScopedSessionHandler(engine) ``` ### Step 4: Expose our database Now that we have completed all necessary setup, we can expose our database as a ReSTful API. ```python # The create_resource method is a shortcut for creating ripozo resources # containing common sets of endpoints. # We need to pass append_slash=True due to a quirk in how flask handles routing resources = [create_resource(model, session_handler, append_slash=True) for model in base.classes] # Register the resources with the adapter to expose them in the API. dispatcher.register_resources(*resources) # and now we run our Flask app app.run() ``` The ``create_resource`` method is highly customizable. Additionally, you can use a declarative, class based implementation that is incredibly flexible. In fact, the ``create_resource`` method uses the declarative implementation under the covers. ### Step 5: Putting it all together Now that we have everything we need, let's put it all together into one function. ```python from flask import Flask from flask_ripozo import FlaskDispatcher from ripozo import adapters from ripozo_sqlalchemy import ScopedSessionHandler, create_resource from sqlalchemy.ext.automap import automap_base from sqlalchemy import create_engine def create_app(database_uri): app = Flask(__name__) engine = create_engine(database_uri) base = automap_base() base.prepare(engine, reflect=True) dispatcher = FlaskDispatcher(app) dispatcher.register_adapters(adapters.HalAdapter, adapters.SirenAdapter) session_handler = ScopedSessionHandler(engine) resources = [create_resource(model, session_handler, append_slash=True) for model in base.classes] dispatcher.register_resources(*resources) app.run() ``` And just like that we have exposed our database as a ReSTful API. Additionally, because this is [ripozo](https://github.com/vertical-knowledge/ripozo), if a table has a relationship to another table, a link to the corresponding row/resource is automatically generated. For example, consider a user table has a Many-to-One relationship with a group table. When we go to a user's endpoint, we'll get a fully qualified URL linking to the associated group's endpoint. ## Conclusion We can see how easy [ripozo](https://github.com/vertical-knowledge/ripozo) makes creating ReSTful API's. With [ripozo](https://github.com/vertical-knowledge/ripozo), you can include authentication and authorization, additional endpoints and much more. [Ripozo](https://github.com/vertical-knowledge/ripozo) is designed to be flexible and efficient at the same time. It provides shortcuts while priotizing extensibility. It unleashes more flexibility and power than any other Hypermedia/HATEOAS ReSTful framework. Ripozo: less effort, better APIs.
/ripozo-oasis-1.0.1.tar.gz/ripozo-oasis-1.0.1/README.md
0.516595
0.916931
README.md
pypi
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from ripozo import Relationship, ListRelationship from ripozo.resources.constructor import ResourceMetaClass from ripozo.resources.restmixins import CRUDL from ripozo_sqlalchemy import AlchemyManager from sqlalchemy.inspection import inspect from sqlalchemy.orm import class_mapper, RelationshipProperty def _get_fields_for_model(model): """ Gets all of the fields on the model. :param DeclarativeModel model: A SQLAlchemy ORM Model :return: A tuple of the fields on the Model corresponding to the columns on the Model. :rtype: tuple """ fields = [] for name in model._sa_class_manager: prop = getattr(model, name) if isinstance(prop.property, RelationshipProperty): for pk in prop.property.mapper.primary_key: fields.append('{0}.{1}'.format(name, pk.name)) else: fields.append(name) return tuple(fields) def _get_pks(model): """ Gets a tuple of the primary keys on the model. :param DeclarativeMeta model: The SQLAlchemy ORM model. :return: tuple of unicode primary key column names :rtype: tuple """ return tuple([key.name for key in inspect(model).primary_key]) def _get_relationships(model): """ Gets the necessary relationships for the resource by inspecting the sqlalchemy model for relationships. :param DeclarativeMeta model: The SQLAlchemy ORM model. :return: A tuple of Relationship/ListRelationship instances corresponding to the relationships on the Model. :rtype: tuple """ relationships = [] for name, relationship in inspect(model).relationships.items(): class_ = relationship.mapper.class_ if relationship.uselist: rel = ListRelationship(name, relation=class_.__name__) else: rel = Relationship(name, relation=class_.__name__) relationships.append(rel) return tuple(relationships) def create_resource(model, session_handler, resource_bases=(CRUDL,), relationships=None, links=None, preprocessors=None, postprocessors=None, fields=None, paginate_by=100, auto_relationships=True, pks=None, create_fields=None, update_fields=None, list_fields=None, append_slash=False): """ Creates a ResourceBase subclass by inspecting a SQLAlchemy Model. This is somewhat more restrictive than explicitly creating managers and resources. However, if you only need any of the basic CRUD+L operations, :param sqlalchemy.Model model: This is the model that will be inspected to create a Resource and Manager from. By default, all of it's fields will be exposed, although this can be overridden using the fields attribute. :param tuple resource_bases: A tuple of ResourceBase subclasses. Defaults to the restmixins.CRUDL class only. However if you only wanted Update and Delete you could pass in ```(restmixins.Update, restmixins.Delete)``` which would cause the resource to inherit from those two. Additionally, you could create your own mixins and pass them in as the resource_bases :param tuple relationships: extra relationships to pass into the ResourceBase constructor. If auto_relationships is set to True, then they will be appended to these relationships. :param tuple links: Extra links to pass into the ResourceBase as the class _links attribute. Defaults to an empty tuple. :param tuple preprocessors: Preprocessors for the resource class attribute. :param tuple postprocessors: Postprocessors for the resource class attribute. :param ripozo_sqlalchemy.SessionHandler|ripozo_sqlalchemy.ScopedSessionHandler session_handler: A session handler to use when instantiating an instance of the Manager class created from the model. This is responsible for getting and handling sessions in both normal cases and exceptions. :param tuple fields: The fields to expose on the api. Defaults to all of the fields on the model. :param bool auto_relationships: If True, then the SQLAlchemy Model will be inspected for relationships and they will be automatically appended to the relationships on the resource class attribute. :param list create_fields: A list of the fields that are valid when creating a resource. By default this will be the fields without any primary keys included :param list update_fields: A list of the fields that are valid when updating a resource. By default this will be the fields without any primary keys included :param list list_fields: A list of the fields that will be returned when the list endpoint is requested. Defaults to the fields attribute. :param bool append_slash: A flag to forcibly append slashes to the end of urls. :return: A ResourceBase subclass and AlchemyManager subclass :rtype: ResourceMetaClass """ relationships = relationships or tuple() if auto_relationships: relationships += _get_relationships(model) links = links or tuple() preprocessors = preprocessors or tuple() postprocessors = postprocessors or tuple() pks = pks or _get_pks(model) fields = fields or _get_fields_for_model(model) list_fields = list_fields or fields create_fields = create_fields or [x for x in fields if x not in set(pks)] update_fields = update_fields or [x for x in fields if x not in set(pks)] manager_cls_attrs = dict(paginate_by=paginate_by, fields=fields, model=model, list_fields=list_fields, create_fields=create_fields, update_fields=update_fields) manager_class = type(str(model.__name__), (AlchemyManager,), manager_cls_attrs) manager = manager_class(session_handler) resource_cls_attrs = dict(preprocessors=preprocessors, postprocessors=postprocessors, _relationships=relationships, _links=links, pks=pks, manager=manager, append_slash=append_slash) res_class = ResourceMetaClass(str(model.__name__), resource_bases, resource_cls_attrs) return res_class
/ripozo-sqlalchemy-1.0.2.tar.gz/ripozo-sqlalchemy-1.0.2/ripozo_sqlalchemy/easy_resource.py
0.79649
0.591841
easy_resource.py
pypi
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from datetime import datetime, date, time, timedelta from decimal import Decimal from functools import wraps from ripozo.exceptions import NotFoundException from ripozo.manager_base import BaseManager from ripozo.resources.fields.base import BaseField from ripozo.resources.fields.common import StringField, IntegerField,\ FloatField, DateTimeField, BooleanField from ripozo.utilities import make_json_safe from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.orm.query import Query import logging import six _logger = logging.getLogger(__name__) _COLUMN_FIELD_MAP = { six.text_type: StringField, six.binary_type: StringField, int: IntegerField, float: FloatField, Decimal: FloatField, datetime: DateTimeField, date: DateTimeField, timedelta: DateTimeField, time: DateTimeField, bool: BooleanField, } def db_access_point(func): """ Wraps a function that actually accesses the database. It injects a session into the method and attempts to handle it after the function has run. :param method func: The method that is interacting with the database. """ @wraps(func) def wrapper(self, *args, **kwargs): """ Wrapper responsible for handling sessions """ session = self.session_handler.get_session() try: resp = func(self, session, *args, **kwargs) except Exception as exc: self.session_handler.handle_session(session, exc=exc) raise exc else: self.session_handler.handle_session(session) return resp return wrapper class AlchemyManager(BaseManager): """ This is the Manager that interops between ripozo and sqlalchemy. It provides a series of convience functions primarily for basic CRUD. This class can be extended as necessary and it is recommended that direct database access should be performed in a manager. :param bool all_fields: If this is true, then all fields on the model will be used. The model will be inspected to get the fields. """ pagination_pk_query_arg = 'page' all_fields = False fields = tuple() def __init__(self, session_handler, *args, **kwargs): super(AlchemyManager, self).__init__(*args, **kwargs) self._field_dict = None self.session_handler = session_handler @staticmethod def _get_field_python_type(model, name): """ Gets the python type for the attribute on the model with the name provided. :param Model model: The SqlAlchemy model class. :param unicode name: The column name on the model that you are attempting to get the python type. :return: The python type of the column :rtype: type """ try: return getattr(model, name).property.columns[0].type.python_type except AttributeError: # It's a relationship parts = name.split('.') model = getattr(model, parts.pop(0)).comparator.mapper.class_ return AlchemyManager._get_field_python_type(model, '.'.join(parts)) except NotImplementedError: # This is for pickle type columns. return object @classmethod def get_field_type(cls, name): """ Takes a field name and gets an appropriate BaseField instance for that column. It inspects the Model that is set on the manager to determine what the BaseField subclass should be. :param unicode name: :return: A BaseField subclass that is appropriate for translating a string input into the appropriate format. :rtype: ripozo.viewsets.fields.base.BaseField """ python_type = cls._get_field_python_type(cls.model, name) if python_type in _COLUMN_FIELD_MAP: field_class = _COLUMN_FIELD_MAP[python_type] return field_class(name) return BaseField(name) @db_access_point def create(self, session, values, *args, **kwargs): """ Creates a new instance of the self.model and persists it to the database. :param dict values: The dictionary of values to set on the model. The key is the column name and the value is what it will be set to. If the cls._create_fields is defined then it will use those fields. Otherwise, it will use the fields defined in cls.fields :param Session session: The sqlalchemy session :return: The serialized model. It will use the self.fields attribute for this. :rtype: dict """ model = self.model() model = self._set_values_on_model(model, values, fields=self.create_fields) session.add(model) session.commit() return self.serialize_model(model) @db_access_point def retrieve(self, session, lookup_keys, *args, **kwargs): """ Retrieves a model using the lookup keys provided. Only one model should be returned by the lookup_keys or else the manager will fail. :param Session session: The SQLAlchemy session to use :param dict lookup_keys: A dictionary mapping the fields and their expected values :return: The dictionary of keys and values for the retrieved model. The only values returned will be those specified by fields attrbute on the class :rtype: dict :raises: NotFoundException """ model = self._get_model(lookup_keys, session) return self.serialize_model(model) @db_access_point def retrieve_list(self, session, filters, *args, **kwargs): """ Retrieves a list of the model for this manager. It is restricted by the filters provided. :param Session session: The SQLAlchemy session to use :param dict filters: The filters to restrict the returned models on :return: A tuple of the list of dictionary representation of the models and the dictionary of meta data :rtype: list, dict """ query = self.queryset(session) translator = IntegerField('tmp') pagination_count = translator.translate( filters.pop(self.pagination_count_query_arg, self.paginate_by) ) pagination_pk = translator.translate( filters.pop(self.pagination_pk_query_arg, 1) ) pagination_pk -= 1 # logic works zero based. Pagination shouldn't be though query = query.filter_by(**filters) if pagination_pk: query = query.offset(pagination_pk * pagination_count) if pagination_count: query = query.limit(pagination_count + 1) count = query.count() next_link = None previous_link = None if count > pagination_count: next_link = {self.pagination_pk_query_arg: pagination_pk + 2, self.pagination_count_query_arg: pagination_count} if pagination_pk > 0: previous_link = {self.pagination_pk_query_arg: pagination_pk, self.pagination_count_query_arg: pagination_count} field_dict = self.dot_field_list_to_dict(self.list_fields) props = self.serialize_model(query[:pagination_count], field_dict=field_dict) meta = dict(links=dict(next=next_link, previous=previous_link)) return props, meta @db_access_point def update(self, session, lookup_keys, updates, *args, **kwargs): """ Updates the model with the specified lookup_keys and returns the dictified object. :param Session session: The SQLAlchemy session to use :param dict lookup_keys: A dictionary mapping the fields and their expected values :param dict updates: The columns and the values to update them to. :return: The dictionary of keys and values for the retrieved model. The only values returned will be those specified by fields attrbute on the class :rtype: dict :raises: NotFoundException """ model = self._get_model(lookup_keys, session) model = self._set_values_on_model(model, updates, fields=self.update_fields) session.commit() return self.serialize_model(model) @db_access_point def delete(self, session, lookup_keys, *args, **kwargs): """ Deletes the model found using the lookup_keys :param Session session: The SQLAlchemy session to use :param dict lookup_keys: A dictionary mapping the fields and their expected values :return: An empty dictionary :rtype: dict :raises: NotFoundException """ model = self._get_model(lookup_keys, session) session.delete(model) session.commit() return {} def queryset(self, session): """ The queryset to use when looking for models. This is advantageous to override if you only want a subset of the model specified. """ return session.query(self.model) def serialize_model(self, model, field_dict=None): """ Takes a model and serializes the fields provided into a dictionary. :param Model model: The Sqlalchemy model instance to serialize :param dict field_dict: The dictionary of fields to return. :return: The serialized model. :rtype: dict """ response = self._serialize_model_helper(model, field_dict=field_dict) return make_json_safe(response) def _serialize_model_helper(self, model, field_dict=None): """ A recursive function for serializing a model into a json ready format. """ field_dict = field_dict or self.dot_field_list_to_dict() if model is None: return None if isinstance(model, Query): model = model.all() if isinstance(model, (list, set)): return [self.serialize_model(m, field_dict=field_dict) for m in model] model_dict = {} for name, sub in six.iteritems(field_dict): value = getattr(model, name) if sub: value = self.serialize_model(value, field_dict=sub) model_dict[name] = value return model_dict def _get_model(self, lookup_keys, session): """ Gets the sqlalchemy Model instance associated with the lookup keys. :param dict lookup_keys: A dictionary of the keys and their associated values. :param Session session: The sqlalchemy session :return: The sqlalchemy orm model instance. """ try: return self.queryset(session).filter_by(**lookup_keys).one() except NoResultFound: raise NotFoundException('No model of type {0} was found using ' 'lookup_keys {1}'.format(self.model.__name__, lookup_keys)) def _set_values_on_model(self, model, values, fields=None): """ Updates the values with the specified values. :param Model model: The sqlalchemy model instance :param dict values: The dictionary of attributes and the values to set. :param list fields: A list of strings indicating the valid fields. Defaults to self.fields. :return: The model with the updated :rtype: Model """ fields = fields or self.fields for name, val in six.iteritems(values): if name not in fields: continue setattr(model, name, val) return model
/ripozo-sqlalchemy-1.0.2.tar.gz/ripozo-sqlalchemy-1.0.2/ripozo_sqlalchemy/alchemymanager.py
0.828592
0.20268
alchemymanager.py
pypi
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from sqlalchemy.orm import sessionmaker, scoped_session class ScopedSessionHandler(object): """ A ScopedSessionHandler is injected into the AlchemyManager in order to get and handle sessions after a database access. There are two required methods for any session handler. It must have a """ def __init__(self, engine): """ Initializes the ScopedSessionHandler which is responsible for getting sessions and closing them after a database access. :param Engine engine: A SQLAlchemy engine. """ self.engine = engine self.session_maker = scoped_session(sessionmaker(bind=self.engine)) def get_session(self): """ Gets an individual session. :return: The session object. :rtype: Session """ return self.session_maker() @staticmethod def handle_session(session, exc=None): """ Handles closing a session. :param Session session: The session to close. :param Exception exc: The exception raised, If an exception was raised, else None """ if exc: session.rollback() session.close() class SessionHandler(object): """ The SessionHandler doesn't do anything. This is helpful in Flask-SQLAlchemy for example where all of the session handling is already under control """ def __init__(self, session): """ :param Session session: The session to pass to the Manager. This is what will be directly used by the application :param Exception exc: The exception raised, If an exception was raised, else None """ self.session = session def get_session(self): """ Gets the session :return: The session for the manager. :rtype: Session """ return self.session @staticmethod def handle_session(session, exc=None): """ rolls back the session if appropriate. :param Session session: The session in use. :param Exception exc: The exception raised, If an exception was raised, else None """ if exc: session.rollback()
/ripozo-sqlalchemy-1.0.2.tar.gz/ripozo-sqlalchemy-1.0.2/ripozo_sqlalchemy/session_handlers.py
0.792263
0.166981
session_handlers.py
pypi
from os.path import abspath, dirname, join import numpy as np import pandas as pd from scipy.fftpack import next_fast_len from scipy.io import loadmat from scipy.ndimage import gaussian_filter1d from scipy.signal import filtfilt, hilbert, remez def ripple_bandpass_filter(sampling_frequency): ORDER = 101 nyquist = 0.5 * sampling_frequency TRANSITION_BAND = 25 RIPPLE_BAND = [150, 250] desired = [ 0, RIPPLE_BAND[0] - TRANSITION_BAND, RIPPLE_BAND[0], RIPPLE_BAND[1], RIPPLE_BAND[1] + TRANSITION_BAND, nyquist, ] return remez(ORDER, desired, [0, 1, 0], Hz=sampling_frequency), 1.0 def _get_series_start_end_times(series): """Extracts the start and end times of segements defined by a boolean pandas Series. Parameters ---------- series : pandas boolean Series (n_time,) Consecutive Trues define each segement. Returns ------- start_times : ndarray, shape (n_segments,) Beginning time of each segment based on the index of the series. end_times : ndarray, shape (n_segments,) End time of each segment based on the index of the series. """ is_start_time = (~series.shift(1).fillna(False)) & series start_times = np.asarray(series.index[is_start_time]) is_end_time = series & (~series.shift(-1).fillna(False)) end_times = np.asarray(series.index[is_end_time]) return start_times, end_times def segment_boolean_series(series, minimum_duration=0.015): """Returns a list of tuples where each tuple contains the start time of segement and end time of segment. It takes a boolean pandas series as input where the index is time. Parameters ---------- series : pandas boolean Series (n_time,) Consecutive Trues define each segement. minimum_duration : float, optional Segments must be at least this duration to be included. Returns ------- segments : list of 2-element tuples """ start_times, end_times = _get_series_start_end_times(series) return [ (start_time, end_time) for start_time, end_time in zip(start_times, end_times) if end_time >= (start_time + minimum_duration) ] def filter_ripple_band(data): """Returns a bandpass filtered signal between 150-250 Hz Parameters ---------- data : array_like, shape (n_time,) Returns ------- filtered_data : array_like, shape (n_time,) """ filter_numerator, filter_denominator = _get_ripplefilter_kernel() is_nan = np.any(np.isnan(data), axis=-1) filtered_data = np.full_like(data, np.nan) filtered_data[~is_nan] = filtfilt( filter_numerator, filter_denominator, data[~is_nan], axis=0 ) return filtered_data def _get_ripplefilter_kernel(): """Returns the pre-computed ripple filter kernel from the Frank lab. The kernel is 150-250 Hz bandpass with 40 db roll off and 10 Hz sidebands. Sampling frequency is 1500 Hz. """ filter_file = join(abspath(dirname(__file__)), "ripplefilter.mat") ripplefilter = loadmat(filter_file) return ripplefilter["ripplefilter"]["kernel"][0][0].flatten(), 1 def extend_threshold_to_mean( is_above_mean, is_above_threshold, time, minimum_duration=0.015 ): """Extract segments above threshold if they remain above the threshold for a minimum amount of time and extend them to the mean. Parameters ---------- is_above_mean : ndarray, shape (n_time,) Time series indicator function specifying when the time series is above the mean is_above_threshold : ndarray, shape (n_time,) Time series indicator function specifying when the time series is above the the threshold. time : ndarray, shape (n_time,) Returns ------- candidate_ripple_times : list of 2-element tuples Each tuple is the start and end time of the candidate ripple. """ is_above_threshold = pd.Series(is_above_threshold, index=time) is_above_mean = pd.Series(is_above_mean, index=time) above_mean_segments = segment_boolean_series( is_above_mean, minimum_duration=minimum_duration ) above_threshold_segments = segment_boolean_series( is_above_threshold, minimum_duration=minimum_duration ) return sorted(_extend_segment(above_threshold_segments, above_mean_segments)) def exclude_movement(candidate_ripple_times, speed, time, speed_threshold=4.0): """Removes candidate ripples if the animal is moving. Parameters ---------- candidate_ripple_times : array_like, shape (n_ripples, 2) speed : ndarray, shape (n_time,) Speed of animal during recording session. time : ndarray, shape (n_time,) Time in recording session. speed_threshold : float, optional Maximum speed for animal to be considered to be moving. Returns ------- ripple_times : ndarray, shape (n_ripples, 2) Ripple times where the animal is not moving. """ candidate_ripple_times = np.array(candidate_ripple_times) try: speed_at_ripple_start = speed[np.in1d(time, candidate_ripple_times[:, 0])] speed_at_ripple_end = speed[np.in1d(time, candidate_ripple_times[:, 1])] is_below_speed_threshold = (speed_at_ripple_start <= speed_threshold) & ( speed_at_ripple_end <= speed_threshold ) return candidate_ripple_times[is_below_speed_threshold] except IndexError: return [] def _find_containing_interval(interval_candidates, target_interval): """Returns the interval that contains the target interval out of a list of interval candidates. This is accomplished by finding the closest start time out of the candidate intervals, since we already know that one interval candidate contains the target interval (the segements above 0 contain the segments above the threshold) """ candidate_start_times = np.asarray(interval_candidates)[:, 0] zero = np.array(0).astype(candidate_start_times.dtype) closest_start_ind = np.max( (candidate_start_times - target_interval[0] <= zero).nonzero() ) return interval_candidates[closest_start_ind] def _extend_segment(segments_to_extend, containing_segments): """Extends the boundaries of a segment if it is a subset of one of the containing segments. Parameters ---------- segments_to_extend : list of 2-element tuples Elements are the start and end times containing_segments : list of 2-element tuples Elements are the start and end times Returns ------- extended_segments : list of 2-element tuples """ segments = [ _find_containing_interval(containing_segments, segment) for segment in segments_to_extend ] return list(set(segments)) # remove duplicate segments def get_envelope(data, axis=0): """Extracts the instantaneous amplitude (envelope) of an analytic signal using the Hilbert transform""" n_samples = data.shape[axis] instantaneous_amplitude = np.abs( hilbert(data, N=next_fast_len(n_samples), axis=axis) ) return np.take(instantaneous_amplitude, np.arange(n_samples), axis=axis) def gaussian_smooth(data, sigma, sampling_frequency, axis=0, truncate=8): """1D convolution of the data with a Gaussian. The standard deviation of the gaussian is in the units of the sampling frequency. The function is just a wrapper around scipy's `gaussian_filter1d`, The support is truncated at 8 by default, instead of 4 in `gaussian_filter1d` Parameters ---------- data : array_like sigma : float sampling_frequency : int axis : int, optional truncate : int, optional Returns ------- smoothed_data : array_like """ return gaussian_filter1d( data, sigma * sampling_frequency, truncate=truncate, axis=axis, mode="constant" ) def threshold_by_zscore(zscored_data, time, minimum_duration=0.015, zscore_threshold=2): """Standardize the data and determine whether it is above a given number. Parameters ---------- data : array_like, shape (n_time,) zscore_threshold : int, optional Returns ------- candidate_ripple_times : pandas Dataframe """ is_above_mean = zscored_data >= 0 is_above_threshold = zscored_data >= zscore_threshold return extend_threshold_to_mean( is_above_mean, is_above_threshold, time, minimum_duration=minimum_duration ) def merge_overlapping_ranges(ranges): """Merge overlapping and adjacent ranges Parameters ---------- ranges : iterable with 2-elements Element 1 is the start of the range. Element 2 is the end of the range. Yields ------- sorted_merged_range : 2-element tuple Element 1 is the start of the merged range. Element 2 is the end of the merged range. >>> list(merge_overlapping_ranges([(5, 7), (3, 5), (-1, 3)])) [(-1, 7)] >>> list(merge_overlapping_ranges([(5, 6), (3, 4), (1, 2)])) [(1, 2), (3, 4), (5, 6)] >>> list(merge_overlapping_ranges([])) [] References ---------- .. [1] http://codereview.stackexchange.com/questions/21307/consolidate- list-of-ranges-that-overlap """ ranges = iter(sorted(ranges)) try: current_start, current_stop = next(ranges) except StopIteration: return None for start, stop in ranges: if start > current_stop: # Gap between segments: output current segment and start a new # one. yield current_start, current_stop current_start, current_stop = start, stop else: # Segments adjacent or overlapping: merge. current_stop = max(current_stop, stop) yield current_start, current_stop def exclude_close_events(candidate_event_times, close_event_threshold=1.0): """Excludes successive events that occur within a `close_event_threshold` of a previously occuring event. Parameters ---------- candidate_event_times : ndarray or list, shape (n_events, 2) Start and end times of possible events close_event_threshold : float or np.timedelta Returns ------- candidate_event_times : ndarray, shape (n_events - too_close_events, 2) """ candidate_event_times = np.array(candidate_event_times) n_events = candidate_event_times.shape[0] new_event_index = np.arange(n_events) new_event_times = candidate_event_times.copy() for ind, (start_time, end_time) in enumerate(candidate_event_times): if np.isin(ind, new_event_index): is_too_close = ( end_time + close_event_threshold > new_event_times[:, 0] ) & (new_event_index > ind) new_event_index = new_event_index[~is_too_close] new_event_times = new_event_times[~is_too_close] return new_event_times if new_event_times.size > 0 else [] def get_multiunit_population_firing_rate( multiunit, sampling_frequency, smoothing_sigma=0.015 ): """Calculates the multiunit population firing rate. Parameters ---------- multiunit : ndarray, shape (n_time, n_signals) Binary array of multiunit spike times. sampling_frequency : float Number of samples per second. smoothing_sigma : float or np.timedelta Amount to smooth the firing rate over time. The default is given assuming time is in units of seconds. Returns ------- multiunit_population_firing_rate : ndarray, shape (n_time,) """ return gaussian_smooth( multiunit.sum(axis=1) * sampling_frequency, smoothing_sigma, sampling_frequency )
/ripple_detection-1.4.0-py3-none-any.whl/ripple_detection/core.py
0.818338
0.547646
core.py
pypi
import numpy as np from scipy.stats import norm RIPPLE_FREQUENCY = 200 def simulate_time(n_samples, sampling_frequency): return np.arange(n_samples) / sampling_frequency def mean_squared(x): return (np.abs(x) ** 2.0).mean() def normalize(y, x=None): """normalize power in y to a (standard normal) white noise signal. Optionally normalize to power in signal `x`. #The mean power of a Gaussian with :math:`\\mu=0` and :math:`\\sigma=1` is 1. https://github.com/python-acoustics/python-acoustics/tree/master/acoustics """ x = mean_squared(x) if x is not None else 1.0 return y * np.sqrt(x / mean_squared(y)) def pink(N, state=None): """ Pink noise. :param N: Amount of samples. :param state: State of PRNG. :type state: :class:`np.random.RandomState` Pink noise has equal power in bands that are proportionally wide. Power density decreases with 3 dB per octave. https://github.com/python-acoustics/python-acoustics/tree/master/acoustics """ state = np.random.RandomState() if state is None else state uneven = N % 2 X = state.randn(N // 2 + 1 + uneven) + 1j * state.randn(N // 2 + 1 + uneven) S = np.sqrt(np.arange(len(X)) + 1.0) # +1 to avoid divide by zero y = (np.fft.irfft(X / S)).real if uneven: y = y[:-1] return normalize(y) def white(N, state=None): """ White noise. :param N: Amount of samples. :param state: State of PRNG. :type state: :class:`np.random.RandomState` White noise has a constant power density. It's narrowband spectrum is therefore flat. The power in white noise will increase by a factor of two for each octave band, and therefore increases with 3 dB per octave. """ state = np.random.RandomState() if state is None else state return state.randn(N) def brown(N, state=None): """ Brown noise. :param N: Amount of samples. :param state: State of PRNG. :type state: :class:`np.random.RandomState` Power decreases with -3 dB per octave. Power density decreases with 6 dB per octave. """ state = np.random.RandomState() if state is None else state uneven = N % 2 X = state.randn(N // 2 + 1 + uneven) + 1j * state.randn(N // 2 + 1 + uneven) S = np.arange(len(X)) + 1 y = np.fft.irfft(X / S).real if uneven: y = y[:-1] return normalize(y) NOISE_FUNCTION = { "white": white, "pink": pink, "brown": brown, } def simulate_LFP( time, ripple_times, ripple_amplitude=2, ripple_duration=0.100, noise_type="brown", noise_amplitude=1.3, ): """Simulate a LFP with a ripple at ripple times""" noise = (noise_amplitude / 2) * NOISE_FUNCTION[noise_type](time.size) ripple_signal = np.sin(2 * np.pi * time * RIPPLE_FREQUENCY) signal = [] try: iter(ripple_times) except TypeError: ripple_times = [ripple_times] for ripple_time in ripple_times: carrier = norm(loc=ripple_time, scale=ripple_duration / 6).pdf(time) carrier /= carrier.max() signal.append((ripple_amplitude / 2) * (ripple_signal * carrier)) return np.sum(signal, axis=0) + noise
/ripple_detection-1.4.0-py3-none-any.whl/ripple_detection/simulate.py
0.919685
0.785966
simulate.py
pypi
from itertools import chain import numpy as np import pandas as pd from scipy.stats import zscore from ripple_detection.core import ( exclude_close_events, exclude_movement, gaussian_smooth, get_envelope, get_multiunit_population_firing_rate, merge_overlapping_ranges, threshold_by_zscore, ) def get_Kay_ripple_consensus_trace( ripple_filtered_lfps, sampling_frequency, smoothing_sigma=0.004 ): ripple_consensus_trace = np.full_like(ripple_filtered_lfps, np.nan) not_null = np.all(pd.notnull(ripple_filtered_lfps), axis=1) ripple_consensus_trace[not_null] = get_envelope( np.asarray(ripple_filtered_lfps)[not_null] ) ripple_consensus_trace = np.sum(ripple_consensus_trace**2, axis=1) ripple_consensus_trace[not_null] = gaussian_smooth( ripple_consensus_trace[not_null], smoothing_sigma, sampling_frequency ) return np.sqrt(ripple_consensus_trace) def Kay_ripple_detector( time, filtered_lfps, speed, sampling_frequency, speed_threshold=4.0, minimum_duration=0.015, zscore_threshold=2.0, smoothing_sigma=0.004, close_ripple_threshold=0.0, ): """Find start and end times of sharp wave ripple events (150-250 Hz) based on Kay et al. 2016 [1]. Parameters ---------- time : array_like, shape (n_time,) filtered_lfps : array_like, shape (n_time, n_signals) Bandpass filtered time series of electric potentials in the ripple band speed : array_like, shape (n_time,) Running speed of animal sampling_frequency : float Number of samples per second. speed_threshold : float, optional Maximum running speed of animal for a ripple minimum_duration : float, optional Minimum time the z-score has to stay above threshold to be considered a ripple. The default is given assuming time is in units of seconds. zscore_threshold : float, optional Number of standard deviations the ripple power must exceed to be considered a ripple smoothing_sigma : float, optional Amount to smooth the time series over time. The default is given assuming time is in units of seconds. close_ripple_threshold : float, optional Exclude ripples that occur within `close_ripple_threshold` of a previously detected ripple. Returns ------- ripple_times : pandas DataFrame References ---------- .. [1] Kay, K., Sosa, M., Chung, J.E., Karlsson, M.P., Larkin, M.C., and Frank, L.M. (2016). A hippocampal network for spatial coding during immobility and sleep. Nature 531, 185-190. """ filtered_lfps = np.asarray(filtered_lfps) speed = np.asarray(speed) time = np.asarray(time) not_null = np.all(pd.notnull(filtered_lfps), axis=1) & pd.notnull(speed) filtered_lfps, speed, time = ( filtered_lfps[not_null], speed[not_null], time[not_null], ) combined_filtered_lfps = get_Kay_ripple_consensus_trace( filtered_lfps, sampling_frequency, smoothing_sigma=smoothing_sigma ) combined_filtered_lfps = zscore(combined_filtered_lfps, nan_policy="omit") candidate_ripple_times = threshold_by_zscore( combined_filtered_lfps, time, minimum_duration, zscore_threshold ) ripple_times = exclude_movement( candidate_ripple_times, speed, time, speed_threshold=speed_threshold ) ripple_times = exclude_close_events(ripple_times, close_ripple_threshold) return _get_event_stats(ripple_times, time, combined_filtered_lfps, speed) def Karlsson_ripple_detector( time, filtered_lfps, speed, sampling_frequency, speed_threshold=4.0, minimum_duration=0.015, zscore_threshold=3.0, smoothing_sigma=0.004, close_ripple_threshold=0.0, ): """Find start and end times of sharp wave ripple events (150-250 Hz) based on Karlsson et al. 2009 [1]. Parameters ---------- time : array_like, shpe (n_time,) filtered_lfps : array_like, shape (n_time, n_signals) Bandpass filtered time series of electric potentials in the ripple band speed : array_like, shape (n_time,) Running speed of animal sampling_frequency : float Number of samples per second. speed_threshold : float, optional Maximum running speed of animal for a ripple minimum_duration : float, optional Minimum time the z-score has to stay above threshold to be considered a ripple. The default is given assuming time is in units of seconds. zscore_threshold : float, optional Number of standard deviations the ripple power must exceed to be considered a ripple smoothing_sigma : float, optional Amount to smooth the time series over time. The default is given assuming time is in units of seconds. close_ripple_threshold : float, optional Exclude ripples that occur within `close_ripple_threshold` of a previously detected ripple. Returns ------- ripple_times : pandas DataFrame References ---------- .. [1] Karlsson, M.P., and Frank, L.M. (2009). Awake replay of remote experiences in the hippocampus. Nature Neuroscience 12, 913-918. """ filtered_lfps = np.asarray(filtered_lfps) speed = np.asarray(speed) time = np.asarray(time) not_null = np.all(pd.notnull(filtered_lfps), axis=1) & pd.notnull(speed) filtered_lfps, speed, time = ( filtered_lfps[not_null], speed[not_null], time[not_null], ) filtered_lfps = get_envelope(filtered_lfps) filtered_lfps = gaussian_smooth( filtered_lfps, sigma=smoothing_sigma, sampling_frequency=sampling_frequency ) filtered_lfps = zscore(filtered_lfps, nan_policy="omit") candidate_ripple_times = [ threshold_by_zscore(filtered_lfp, time, minimum_duration, zscore_threshold) for filtered_lfp in filtered_lfps.T ] candidate_ripple_times = list( merge_overlapping_ranges(chain.from_iterable(candidate_ripple_times)) ) ripple_times = exclude_movement( candidate_ripple_times, speed, time, speed_threshold=speed_threshold ) ripple_times = exclude_close_events(ripple_times, close_ripple_threshold) return _get_event_stats(ripple_times, time, filtered_lfps.mean(axis=1), speed) def Roumis_ripple_detector( time, filtered_lfps, speed, sampling_frequency, speed_threshold=4.0, minimum_duration=0.015, zscore_threshold=2.0, smoothing_sigma=0.004, close_ripple_threshold=0.0, ): """Find start and end times of sharp wave ripple events (150-250 Hz) based on [1]. Parameters ---------- time : array_like, shpe (n_time,) filtered_lfps : array_like, shape (n_time, n_signals) Bandpass filtered time series of electric potentials in the ripple band speed : array_like, shape (n_time,) Running speed of animal sampling_frequency : float Number of samples per second. speed_threshold : float, optional Maximum running speed of animal for a ripple minimum_duration : float, optional Minimum time the z-score has to stay above threshold to be considered a ripple. The default is given assuming time is in units of seconds. zscore_threshold : float, optional Number of standard deviations the ripple power must exceed to be considered a ripple smoothing_sigma : float, optional Amount to smooth the time series over time. The default is given assuming time is in units of seconds. close_ripple_threshold : float, optional Exclude ripples that occur within `close_ripple_threshold` of a previously detected ripple. Returns ------- ripple_times : pandas DataFrame """ filtered_lfps = np.asarray(filtered_lfps) speed = np.asarray(speed) time = np.asarray(time) not_null = np.all(pd.notnull(filtered_lfps), axis=1) & pd.notnull(speed) filtered_lfps, speed, time = ( filtered_lfps[not_null], speed[not_null], time[not_null], ) filtered_lfps = get_envelope(filtered_lfps) ** 2 filtered_lfps = gaussian_smooth( filtered_lfps, sigma=smoothing_sigma, sampling_frequency=sampling_frequency ) combined_filtered_lfps = np.mean(np.sqrt(filtered_lfps), axis=1) combined_filtered_lfps = zscore(combined_filtered_lfps, nan_policy="omit") candidate_ripple_times = threshold_by_zscore( combined_filtered_lfps, time, minimum_duration, zscore_threshold ) ripple_times = exclude_movement( candidate_ripple_times, speed, time, speed_threshold=speed_threshold ) ripple_times = exclude_close_events(ripple_times, close_ripple_threshold) index = pd.Index(np.arange(len(ripple_times)) + 1, name="ripple_number") return pd.DataFrame(ripple_times, columns=["start_time", "end_time"], index=index) def multiunit_HSE_detector( time, multiunit, speed, sampling_frequency, speed_threshold=4.0, minimum_duration=0.015, zscore_threshold=2.0, smoothing_sigma=0.015, close_event_threshold=0.0, use_speed_threshold_for_zscore=False, ): """Multiunit High Synchrony Event detector. Finds times when the multiunit population spiking activity is high relative to the average. Parameters ---------- time : ndarray, shape (n_time,) multiunit : ndarray, shape (n_time, n_signals) Binary array of multiunit spike times. speed : ndarray, shape (n_time,) Running speed of animal sampling_frequency : float Number of samples per second. speed_threshold : float Maximum running speed of animal to be counted as an event minimum_duration : float Minimum time the z-score has to stay above threshold to be considered an event. zscore_threshold : float Number of standard deviations the multiunit population firing rate must exceed to be considered an event smoothing_sigma : float or np.timedelta Amount to smooth the firing rate over time. The default is given assuming time is in units of seconds. close_event_threshold : float Exclude events that occur within `close_event_threshold` of a previously detected event. use_speed_threshold_for_zscore : bool Use speed thresholded multiunit for mean and std for z-score calculation Returns ------- high_synchrony_event_times : pandas.DataFrame, shape (n_events, 2) References ---------- .. [1] Davidson, T.J., Kloosterman, F., and Wilson, M.A. (2009). Hippocampal Replay of Extended Experience. Neuron 63, 497–507. """ multiunit = np.asarray(multiunit) speed = np.asarray(speed) time = np.asarray(time) firing_rate = get_multiunit_population_firing_rate( multiunit, sampling_frequency, smoothing_sigma ) if use_speed_threshold_for_zscore: mean = np.nanmean(firing_rate[speed < speed_threshold]) std = np.nanstd(firing_rate[speed < speed_threshold]) else: mean = np.nanmean(firing_rate) std = np.nanstd(firing_rate) firing_rate = (firing_rate - mean) / std candidate_high_synchrony_events = threshold_by_zscore( firing_rate, time, minimum_duration, zscore_threshold ) high_synchrony_events = exclude_movement( candidate_high_synchrony_events, speed, time, speed_threshold=speed_threshold ) high_synchrony_events = exclude_close_events( high_synchrony_events, close_event_threshold ) return _get_event_stats(high_synchrony_events, time, firing_rate, speed) def _get_event_stats(event_times, time, zscore_metric, speed): index = pd.Index(np.arange(len(event_times)) + 1, name="event_number") try: speed_at_start = speed[np.in1d(time, event_times[:, 0])] speed_at_end = speed[np.in1d(time, event_times[:, 1])] except (IndexError, TypeError): speed_at_start = np.full_like(event_times, np.nan) speed_at_end = np.full_like(event_times, np.nan) mean_zscore = [] median_zscore = [] max_zscore = [] min_zscore = [] duration = [] max_speed = [] min_speed = [] median_speed = [] mean_speed = [] for start_time, end_time in event_times: ind = np.logical_and(time >= start_time, time <= end_time) event_zscore = zscore_metric[ind] mean_zscore.append(np.mean(event_zscore)) median_zscore.append(np.median(event_zscore)) max_zscore.append(np.max(event_zscore)) min_zscore.append(np.min(event_zscore)) duration.append(end_time - start_time) max_speed.append(np.max(speed[ind])) min_speed.append(np.min(speed[ind])) median_speed.append(np.median(speed[ind])) mean_speed.append(np.mean(speed[ind])) try: event_start_times = event_times[:, 0] event_end_times = event_times[:, 1] except TypeError: event_start_times = [] event_end_times = [] return pd.DataFrame( { "start_time": event_start_times, "end_time": event_end_times, "duration": duration, "mean_zscore": mean_zscore, "median_zscore": median_zscore, "max_zscore": max_zscore, "min_zscore": min_zscore, "speed_at_start": speed_at_start, "speed_at_end": speed_at_end, "max_speed": max_speed, "min_speed": min_speed, "median_speed": median_speed, "mean_speed": mean_speed, }, index=index, )
/ripple_detection-1.4.0-py3-none-any.whl/ripple_detection/detectors.py
0.910162
0.445891
detectors.py
pypi
#!/usr/bin/env python import numpy as np import pandas as pd from ripple_detector_CNN.external.modified_ripple_detection.core import ( exclude_close_events, filter_band, gaussian_smooth, threshold_by_zscore, ) def define_ripple_candidates( time_x, lfp, samp_rate, lo_hz=150, hi_hz=250, minimum_duration=0.015, zscore_threshold=1.0, smoothing_sigma=0.004, close_ripple_threshold=0.0, only_calc_filted_magni=False, ): ## Checks signal shape if lfp.ndim == 1: lfp = lfp[:, np.newaxis] assert lfp.ndim == 2 ## Checks signal dtype if lfp.dtype == np.float16: lfp = lfp.astype(np.float32) ## Checks NaN not_null = np.all(pd.notnull(lfp), axis=1) lfp = lfp[not_null] if not only_calc_filted_magni: time_x = time_x[not_null] ## Band-pass filtering print("\nRipple Band {}-{} Hz\n".format(lo_hz, hi_hz)) filtered_lfps = np.stack( [filter_band(lfp, samp_rate, lo_hz=lo_hz, hi_hz=hi_hz) for lfp in lfp.T] ) ## Sum over electrodes combined_filtered_lfps = np.sum(filtered_lfps ** 2, axis=0) ## Gaussian filtering along with the time axis combined_filtered_lfps = gaussian_smooth( combined_filtered_lfps, smoothing_sigma, samp_rate ) combined_filtered_lfps = np.sqrt(combined_filtered_lfps) filted_magni = combined_filtered_lfps # alias if only_calc_filted_magni: filtered_lfps = None rip_sec = None else: candidate_ripple_times = threshold_by_zscore( combined_filtered_lfps, time_x, minimum_duration, zscore_threshold ) ripple_times = exclude_close_events( candidate_ripple_times, close_ripple_threshold ) index = pd.Index(np.arange(len(ripple_times)) + 1, name="ripple_number") rip_sec = pd.DataFrame( ripple_times, columns=["start_sec", "end_sec"], index=index ) filtered_lfps = filtered_lfps.squeeze() return filtered_lfps, filted_magni, rip_sec
/ripple_detector_CNN-0.1.5-py3-none-any.whl/ripple_detector_CNN/define_ripple_candidates.py
0.462716
0.325494
define_ripple_candidates.py
pypi
import torch.nn as nn import math import torch def init_act_layer(act_str="relu"): if act_str == "relu": act_layer = nn.ReLU() if act_str == "lrelu": act_layer = nn.LeakyReLU(0.1) return act_layer def calc_out_len(i, k, s, p, d=1): o = (i + 2 * p - k - (k - 1) * (d - 1)) / s + 1 return o def pad_len_for_the_same_length(i, k, s, d=1): p = ((i - 1) * s - i + k + (k - 1) * (d - 1)) / 2 return p def conv_k(in_chs, out_chs, k=1, s=1, p=1): """Build size k kernel's convolution layer with padding""" return nn.Conv1d(in_chs, out_chs, kernel_size=k, stride=s, padding=p, bias=False) class BasicBlock(nn.Module): """Basic Block using kernel sizes = (7,5,3) convolustion with padding""" expansion = 1 def __init__(self, in_chs, out_chs, activation_str="relu"): super(BasicBlock, self).__init__() self.in_chs = in_chs self.out_chs = out_chs if activation_str == "relu": activation = nn.ReLU() if activation_str == "lrelu": activation = nn.LeakyReLU(0.1) if activation_str == "mish": activation = Mish() if activation_str == "swish": activation = Swish() self.conv7 = conv_k(in_chs, out_chs, k=7, p=3) self.bn7 = nn.BatchNorm1d(out_chs) self.activation7 = activation self.conv5 = conv_k(out_chs, out_chs, k=5, p=2) self.bn5 = nn.BatchNorm1d(out_chs) self.activation5 = activation self.conv3 = conv_k(out_chs, out_chs, k=3, p=1) self.bn3 = nn.BatchNorm1d(out_chs) self.activation3 = activation self.expansion_conv = conv_k(in_chs, out_chs, k=1, p=0) self.bn = nn.BatchNorm1d(out_chs) self.activation = activation def forward(self, x): residual = x x = self.conv7(x) x = self.bn7(x) x = self.activation7(x) x = self.conv5(x) x = self.bn5(x) x = self.activation5(x) x = self.conv3(x) x = self.bn3(x) x = self.activation3(x) if self.in_chs != self.out_chs: residual = self.expansion_conv(residual) residual = self.bn(residual) x += residual x = self.activation(x) return x class SecondLevelBlock(nn.Module): """Second level block using two basic blocks. Two basic blocks are connected with channel wide residual connections. """ def __init__(self, n_filters, activation_str="relu"): super().__init__() self.n_filters = n_filters self.basicblock = BasicBlock( n_filters, n_filters, activation_str=activation_str ) def forward(self, x): residual = x x = self.basicblock(x) return x if __name__ == "__main__": bs, n_chs, seq_len = 16, 19, 1000 inp = torch.rand(bs, n_chs, seq_len).cuda() n_filters = 4 slb = SecondLevelBlock(n_chs).cuda() class SecondLevelBlock(nn.Module): """Second level block using two basic blocks. Two basic blocks are connected with channel wide residual connections. """ def __init__(self, n_filters, activation_str="relu"): super().__init__() self.n_filters = n_filters self.basicblock = BasicBlock( n_filters, n_filters, activation_str=activation_str ) self.fc = nn.Linear(n_filters, 1) self.dropout_layer = nn.Dropout(0.5) # self.avg_pooling_layer = nn.AdaptiveAvgPool1d(1) def forward(self, x): # ch_mean_residual = self.avg_pooling_layer(x.transpose(-2, -1)).transpose(-2, -1) ch_weighted_mean_residual = self.fc(x.transpose(-2, -1)).transpose(-2, -1) x = self.basicblock(x) x += self.dropout_layer(ch_weighted_mean_residual) return x if __name__ == "__main__": bs, n_chs, seq_len = 16, 19, 1000 inp = torch.rand(bs, n_chs, seq_len).cuda() n_filters = 4 slb = SecondLevelBlock(n_chs).cuda() out = slb(inp)
/ripple_detector_CNN-0.1.5-py3-none-any.whl/ripple_detector_CNN/ResNet1D/modules.py
0.940099
0.458409
modules.py
pypi
from os.path import abspath, dirname, join import numpy as np import pandas as pd from scipy.fftpack import next_fast_len from scipy.io import loadmat from scipy.ndimage.filters import gaussian_filter1d from scipy.signal import filtfilt, hilbert, remez from scipy.stats import zscore from numba import jit def filter_band(data, sampling_frequency=1000, lo_hz=150, hi_hz=250): """Returns a bandpass filtered signal between [lo_hz, hi_hz] Parameters ---------- data : array_like, shape (n_time,) Returns ------- filtered_data : array_like, shape (n_time,) """ filter_numerator, filter_denominator = _mk_bandpass_filter( sampling_frequency, lo_hz=lo_hz, hi_hz=hi_hz ) is_nan = np.isnan(data) filtered_data = np.full_like(data, np.nan) filtered_data[~is_nan] = filtfilt( filter_numerator, filter_denominator, data[~is_nan], axis=0 ) return filtered_data def _mk_bandpass_filter(samp_rate, lo_hz=150, hi_hz=250, order=100): # Calculate the filter-coefficients for the finite impulse response (FIR) filter # whose transfer function minimizes the maximum error between the desired gain and # the realized gain in the specified frequency bands using the Remez exchange algorithm. num_taps = order + 1 nyq = 0.5 * samp_rate TRANSITION_BAND = 25 bands = [0, lo_hz - TRANSITION_BAND, lo_hz, hi_hz, hi_hz + TRANSITION_BAND, nyq] # A sequence half the size of bands containing the desired gain in each of the specified bands. desired = [0, 1, 0] ## Calculate the minimax optimal filter using the Remez exchange algorithm. minimax_optimal_filter = remez(num_taps, bands, desired, Hz=samp_rate) return minimax_optimal_filter, 1.0 def _get_series_start_end_times(series): """Extracts the start and end times of segements defined by a boolean pandas Series. Parameters ---------- series : pandas boolean Series (n_time,) Consecutive Trues define each segement. Returns ------- start_times : ndarray, shape (n_segments,) Beginning time of each segment based on the index of the series. end_times : ndarray, shape (n_segments,) End time of each segment based on the index of the series. """ is_start_time = (~series.shift(1).fillna(False)) & series start_times = series.index[is_start_time].to_numpy() # get_values() is_end_time = series & (~series.shift(-1).fillna(False)) end_times = series.index[is_end_time].to_numpy() # .get_values() return start_times, end_times def _segment_boolean_series(series, minimum_duration=0.015): """Returns a list of tuples where each tuple contains the start time of segement and end time of segment. It takes a boolean pandas series as input where the index is time. Parameters ---------- series : pandas boolean Series (n_time,) Consecutive Trues define each segement. minimum_duration : float, optional Segments must be at least this duration to be included. Returns ------- segments : list of 2-element tuples """ start_times, end_times = _get_series_start_end_times(series) return [ (start_time, end_time) for start_time, end_time in zip(start_times, end_times) if end_time >= (start_time + minimum_duration) ] def _get_ripplefilter_kernel(): """Returns the pre-computed ripple filter kernel from the Frank lab. The kernel is 150-250 Hz bandpass with 40 db roll off and 10 Hz sidebands. """ filter_file = join(abspath(dirname(__file__)), "ripplefilter.mat") ripplefilter = loadmat(filter_file) return ripplefilter["ripplefilter"]["kernel"][0][0].flatten(), 1 def extend_threshold_to_mean( is_above_mean, is_above_threshold, time, minimum_duration=0.015 ): """Extract segments above threshold if they remain above the threshold for a minimum amount of time and extend them to the mean. Parameters ---------- is_above_mean : ndarray, shape (n_time,) Time series indicator function specifying when the time series is above the mean is_above_threshold : ndarray, shape (n_time,) Time series indicator function specifying when the time series is above the the threshold. time : ndarray, shape (n_time,) Returns ------- candidate_ripple_times : list of 2-element tuples Each tuple is the start and end time of the candidate ripple. """ is_above_threshold = pd.Series(is_above_threshold, index=time) is_above_mean = pd.Series(is_above_mean, index=time) above_mean_segments = _segment_boolean_series( is_above_mean, minimum_duration=minimum_duration ) above_threshold_segments = _segment_boolean_series( is_above_threshold, minimum_duration=minimum_duration ) return sorted(_extend_segment(above_threshold_segments, above_mean_segments)) def exclude_movement(candidate_ripple_times, speed, time, speed_threshold=4.0): """Removes candidate ripples if the animal is moving. Parameters ---------- candidate_ripple_times : array_like, shape (n_ripples, 2) speed : ndarray, shape (n_time,) Speed of animal during recording session. time : ndarray, shape (n_time,) Time in recording session. speed_threshold : float, optional Maximum speed for animal to be considered to be moving. Returns ------- ripple_times : ndarray, shape (n_ripples, 2) Ripple times where the animal is not moving. """ candidate_ripple_times = np.array(candidate_ripple_times) try: speed_at_ripple_start = speed[np.in1d(time, candidate_ripple_times[:, 0])] speed_at_ripple_end = speed[np.in1d(time, candidate_ripple_times[:, 1])] is_below_speed_threshold = (speed_at_ripple_start <= speed_threshold) & ( speed_at_ripple_end <= speed_threshold ) return candidate_ripple_times[is_below_speed_threshold] except IndexError: return [] def _find_containing_interval(interval_candidates, target_interval): """Returns the interval that contains the target interval out of a list of interval candidates. This is accomplished by finding the closest start time out of the candidate intervals, since we already know that one interval candidate contains the target interval (the segements above 0 contain the segments above the threshold) """ candidate_start_times = np.asarray(interval_candidates)[:, 0] zero = np.array(0).astype(candidate_start_times.dtype) closest_start_ind = np.max( (candidate_start_times - target_interval[0] <= zero).nonzero() ) return interval_candidates[closest_start_ind] def _extend_segment(segments_to_extend, containing_segments): """Extends the boundaries of a segment if it is a subset of one of the containing segments. Parameters ---------- segments_to_extend : list of 2-element tuples Elements are the start and end times containing_segments : list of 2-element tuples Elements are the start and end times Returns ------- extended_segments : list of 2-element tuples """ segments = [ _find_containing_interval(containing_segments, segment) for segment in segments_to_extend ] return list(set(segments)) # remove duplicate segments def get_envelope(data, axis=0): """Extracts the instantaneous amplitude (envelope) of an analytic signal using the Hilbert transform""" n_samples = data.shape[axis] instantaneous_amplitude = np.abs( hilbert(data, N=next_fast_len(n_samples), axis=axis) ) return np.take(instantaneous_amplitude, np.arange(n_samples), axis=axis) def gaussian_smooth(data, sigma, sampling_frequency, axis=0, truncate=8): """1D convolution of the data with a Gaussian. The standard deviation of the gaussian is in the units of the sampling frequency. The function is just a wrapper around scipy's `gaussian_filter1d`, The support is truncated at 8 by default, instead of 4 in `gaussian_filter1d` Parameters ---------- data : array_like sigma : float sampling_frequency : int axis : int, optional truncate : int, optional Returns ------- smoothed_data : array_like """ return gaussian_filter1d( data, sigma * sampling_frequency, truncate=truncate, axis=axis, mode="constant" ) def threshold_by_zscore(data, time, minimum_duration=0.015, zscore_threshold=2): """Standardize the data and determine whether it is above a given number. Parameters ---------- data : array_like, shape (n_time,) zscore_threshold : int, optional Returns ------- candidate_ripple_times : pandas Dataframe """ zscored_data = zscore(data) is_above_mean = zscored_data >= 0 is_above_threshold = zscored_data >= zscore_threshold return extend_threshold_to_mean( is_above_mean, is_above_threshold, time, minimum_duration=minimum_duration ) def merge_overlapping_ranges(ranges): """Merge overlapping and adjacent ranges Parameters ---------- ranges : iterable with 2-elements Element 1 is the start of the range. Element 2 is the end of the range. Yields ------- sorted_merged_range : 2-element tuple Element 1 is the start of the merged range. Element 2 is the end of the merged range. >>> list(_merge_overlapping_ranges([(5,7), (3,5), (-1,3)])) [(-1, 7)] >>> list(_merge_overlapping_ranges([(5,6), (3,4), (1,2)])) [(1, 2), (3, 4), (5, 6)] >>> list(_merge_overlapping_ranges([])) [] References ---------- .. [1] http://codereview.stackexchange.com/questions/21307/consolidate- list-of-ranges-that-overlap """ ranges = iter(sorted(ranges)) current_start, current_stop = next(ranges) for start, stop in ranges: if start > current_stop: # Gap between segments: output current segment and start a new # one. yield current_start, current_stop current_start, current_stop = start, stop else: # Segments adjacent or overlapping: merge. current_stop = max(current_stop, stop) yield current_start, current_stop def exclude_close_events(candidate_event_times, close_event_threshold=1.0): """Excludes successive events that occur within a `close_event_threshold` of a previously occuring event. Parameters ---------- candidate_event_times : ndarray or list, shape (n_events, 2) Start and end times of possible events close_event_threshold : float or np.timedelta Returns ------- candidate_event_times : ndarray, shape (n_events - too_close_events, 2) """ candidate_event_times = np.array(candidate_event_times) n_events = candidate_event_times.shape[0] new_event_index = np.arange(n_events) new_event_times = candidate_event_times.copy() for ind, (start_time, end_time) in enumerate(candidate_event_times): if np.isin(ind, new_event_index): is_too_close = ( end_time + close_event_threshold > new_event_times[:, 0] ) & (new_event_index > ind) new_event_index = new_event_index[~is_too_close] new_event_times = new_event_times[~is_too_close] return new_event_times if new_event_times.size > 0 else [] def get_multiunit_population_firing_rate( multiunit, sampling_frequency, smoothing_sigma=0.015 ): """Calculates the multiunit population firing rate. Parameters ---------- multiunit : ndarray, shape (n_time, n_signals) Binary array of multiunit spike times. sampling_frequency : float Number of samples per second. smoothing_sigma : float or np.timedelta Amount to smooth the firing rate over time. The default is given assuming time is in units of seconds. Returns ------- multiunit_population_firing_rate : ndarray, shape (n_time,) """ return gaussian_smooth( multiunit.mean(axis=1) * sampling_frequency, smoothing_sigma, sampling_frequency )
/ripple_detector_CNN-0.1.5-py3-none-any.whl/ripple_detector_CNN/external/modified_ripple_detection/core.py
0.858006
0.534066
core.py
pypi
import hashlib from binascii import hexlify from ecdsa import curves, SigningKey, six from ecdsa.util import sigencode_der from .serialize import ( to_bytes, from_bytes, RippleBaseDecoder, serialize_object, fmt_hex) __all__ = ('sign_transaction', 'signature_for_transaction') tfFullyCanonicalSig = 0x80000000 def sign_transaction(transaction, secret, flag_canonical=True): """High-level signing function.hexlify - Adds a signature (``TxnSignature``) field to the transaction object. - By default will set the ``FullyCanonicalSig`` flag to `` """ if flag_canonical: transaction['Flags'] = transaction.get('Flags', 0) | tfFullyCanonicalSig sig = signature_for_transaction(transaction, secret) transaction['TxnSignature'] = sig return transaction def signature_for_transaction(transaction, secret): """Calculate the fully-canonical signature of the transaction. Will set the ``SigningPubKey`` as appropriate before signing. ``transaction`` is a Python object. The result value is what you can insert into as ``TxSignature`` into the transaction structure you submit. """ seed = parse_seed(secret) key = root_key_from_seed(seed) # Apparently the pub key is required to be there. transaction['SigningPubKey'] = fmt_hex(ecc_point_to_bytes_compressed( key.privkey.public_key.point, pad=True)) # Convert the transaction to a binary representation signing_hash = create_signing_hash(transaction) # Create a hex-formatted signature. return fmt_hex(ecdsa_sign(key, signing_hash)) def parse_seed(secret): """Your Ripple secret is a seed from which the true private key can be derived. The ``Seed.parse_json()`` method of ripple-lib supports different ways of specifying the seed, including a 32-byte hex value. We just support the regular base-encoded secret format given to you by the client when creating an account. """ assert secret[0] == 's' return RippleBaseDecoder.decode(secret) def root_key_from_seed(seed): """This derives your master key the given seed. Implemented in ripple-lib as ``Seed.prototype.get_key``, and further is described here: https://ripple.com/wiki/Account_Family#Root_Key_.28GenerateRootDeterministicKey.29 """ seq = 0 while True: private_gen = from_bytes(first_half_of_sha512( b''.join([seed, to_bytes(seq, 4)]))) seq += 1 if curves.SECP256k1.order >= private_gen: break public_gen = curves.SECP256k1.generator * private_gen # Now that we have the private and public generators, we apparently # have to calculate a secret from them that can be used as a ECDSA # signing key. secret = i = 0 public_gen_compressed = ecc_point_to_bytes_compressed(public_gen) while True: secret = from_bytes(first_half_of_sha512( b"".join([ public_gen_compressed, to_bytes(0, 4), to_bytes(i, 4)]))) i += 1 if curves.SECP256k1.order >= secret: break secret = (secret + private_gen) % curves.SECP256k1.order # The ECDSA signing key object will, given this secret, then expose # the actual private and public key we are supposed to work with. key = SigningKey.from_secret_exponent(secret, curves.SECP256k1) # Attach the generators as supplemental data key.private_gen = private_gen key.public_gen = public_gen return key def ecdsa_sign(key, signing_hash, **kw): """Sign the given data. The key is the secret returned by :func:`root_key_from_seed`. The data will be a binary coded transaction. """ r, s = key.sign_number(int(signing_hash, 16), **kw) r, s = ecdsa_make_canonical(r, s) # Encode signature in DER format, as in # ``sjcl.ecc.ecdsa.secretKey.prototype.encodeDER`` der_coded = sigencode_der(r, s, None) return der_coded def ecdsa_make_canonical(r, s): """Make sure the ECDSA signature is the canonical one. https://github.com/ripple/ripple-lib/commit/9d6ccdcab1fc237dbcfae41fc9e0ca1d2b7565ca https://ripple.com/wiki/Transaction_Malleability """ # For a canonical signature we want the lower of two possible values for s # 0 < s <= n/2 N = curves.SECP256k1.order if not N / 2 >= s: s = N - s return r, s def get_ripple_from_pubkey(pubkey): """Given a public key, determine the Ripple address. """ ripemd160 = hashlib.new('ripemd160') ripemd160.update(hashlib.sha256(pubkey).digest()) return RippleBaseDecoder.encode(ripemd160.digest()) def get_ripple_from_secret(seed): """Another helper. Returns the first ripple address from the secret.""" key = root_key_from_seed(parse_seed(seed)) pubkey = ecc_point_to_bytes_compressed(key.privkey.public_key.point, pad=True) return get_ripple_from_pubkey(pubkey) # From ripple-lib:hashprefixes.js HASH_TX_ID = 0x54584E00; # 'TXN' HASH_TX_SIGN = 0x53545800 # 'STX' HASH_TX_SIGN_TESTNET = 0x73747800 # 'stx' def create_signing_hash(transaction, testnet=False): """This is the actual value to be signed. It consists of a prefix and the binary representation of the transaction. """ prefix = HASH_TX_SIGN_TESTNET if testnet else HASH_TX_SIGN return hash_transaction(transaction, prefix) def hash_transaction(transaction, prefix): """Create a hash of the transaction and the prefix. """ binary = first_half_of_sha512( to_bytes(prefix, 4) + serialize_object(transaction, hex=False)) return hexlify(binary).upper() def first_half_of_sha512(*bytes): """As per spec, this is the hashing function used.""" hash = hashlib.sha512() for part in bytes: hash.update(part) return hash.digest()[:256//8] def ecc_point_to_bytes_compressed(point, pad=False): """ In ripple-lib, implemented as a prototype extension ``sjcl.ecc.point.prototype.toBytesCompressed`` in ``sjcl-custom``. Also implemented as ``KeyPair.prototype._pub_bits``, though in that case it explicitly first pads the point to the bit length of the curve prime order value. """ header = b'\x02' if point.y() % 2 == 0 else b'\x03' bytes = to_bytes( point.x(), curves.SECP256k1.order.bit_length()//8 if pad else None) return b"".join([header, bytes]) class Test: def test_parse_seed(self): # To get the reference value in ripple-lib: # Seed.from_json(...)._value.toString() parsed = parse_seed('ssq55ueDob4yV3kPVnNQLHB6icwpC') assert from_bytes(parsed) == \ 109259249403722017025835552665225484154 def test_wiki_test_vector(self): # https://ripple.com/wiki/Account_Family#Test_Vectors seed = parse_seed('shHM53KPZ87Gwdqarm1bAmPeXg8Tn') assert fmt_hex(seed) == '71ED064155FFADFA38782C5E0158CB26' key = root_key_from_seed(seed) assert fmt_hex(to_bytes(key.private_gen)) == \ '7CFBA64F771E93E817E15039215430B53F7401C34931D111EAB3510B22DBB0D8' assert get_ripple_from_pubkey( ecc_point_to_bytes_compressed(key.privkey.public_key.point, pad=True)) == \ 'rhcfR9Cg98qCxHpCcPBmMonbDBXo84wyTn' def test_key_derivation(self): key = root_key_from_seed(parse_seed('ssq55ueDob4yV3kPVnNQLHB6icwpC')) # This ensures the key was properly initialized expected = '0x902981cd5e0c862c53dc4854b6da4cc04179a2a524912d79800ac4c95435564d' if not six.PY3: expected = expected + 'L' assert hex(key.privkey.secret_multiplier) == expected def test_ripple_from_secret(self): assert get_ripple_from_secret('shHM53KPZ87Gwdqarm1bAmPeXg8Tn') ==\ 'rhcfR9Cg98qCxHpCcPBmMonbDBXo84wyTn' def test_signing_hash(self): assert create_signing_hash({"TransactionType": "Payment"}) == \ b'903C926641095B392A123D4CCD19E060DD8A603C91DDFF254AC9AD3B986C10CF' def test_der_encoding(self): # This simply verifies that the DER encoder from the ECDSA lib # we're using does the right thing and matches the output of the # DER encoder of ripple-lib. assert hexlify(sigencode_der( int('ff89083ed4923b3379381826339c614ac1cb79bf36b18c34d5e97784c5a5a9db', 16), int('cc4355eda8ce79c629fb53b0d19abc1b543d9f174626cf33b8a26254c63b22b7', 16), None)) == \ b'3046022100ff89083ed4923b3379381826339c614ac1cb79bf36b18c34d5e97784c5a5a9db022100cc4355eda8ce79c629fb53b0d19abc1b543d9f174626cf33b8a26254c63b22b7' def test_canonical_signature(self): # From https://github.com/ripple/ripple-lib/blob/9d6ccdcab1fc237dbcfae41fc9e0ca1d2b7565ca/test/sjcl-ecdsa-canonical-test.js def parse_hex_sig(hexstring): l = len(hexstring) r = int(hexstring[:l//2], 16) s = int(hexstring[l//2:], 16) return r, s # Test a signature that will be canonicalized input = "27ce1b914045ba7e8c11a2f2882cb6e07a19d4017513f12e3e363d71dc3fff0fb0a0747ecc7b4ca46e45b3b32b6b2a066aa0249c027ef11e5bce93dab756549c" r, s = ecdsa_make_canonical(*parse_hex_sig(input)) assert (r, s) == parse_hex_sig('27ce1b914045ba7e8c11a2f2882cb6e07a19d4017513f12e3e363d71dc3fff0f4f5f8b813384b35b91ba4c4cd494d5f8500eb84aacc9af1d6403cab218dfeca5') # Test a signature that is already fully-canonical input = "5c32bc2b4d34e27af9fb66eeea0f47f6afb3d433658af0f649ebae7b872471ab7d23860688aaf9d8131f84cfffa6c56bf9c32fd8b315b2ef9d6bcb243f7a686c" r, s = ecdsa_make_canonical(*parse_hex_sig(input)) assert (r, s) == parse_hex_sig(input) def test_sign(self): # Verify a correct signature is created (uses a fixed k value): key = root_key_from_seed(parse_seed('ssq55ueDob4yV3kPVnNQLHB6icwpC')) assert hexlify(ecdsa_sign(key, 'FF00EECC', k=3)) == \ b'3045022100f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f902205f6d58be6182b9a1e04fcec36f75668deafad2e4336b48770ee5c559d3518301'
/ripple-python-0.2.11.tar.gz/ripple-python-0.2.11/ripple/sign.py
0.853333
0.459804
sign.py
pypi
import importlib.resources as pkg_resources from typing import Callable, Tuple import jax import jax.numpy as jnp import numpy as np from . import noise_resources from .typing import Array f_range_LIGOI = (40.0, 1e4) r""" LIGO-I frequency range [Hz]. References: `<https://arxiv.org/abs/gr-qc/0010009>`_ """ def Sn_LIGOI(f: Array) -> Array: r""" LIGO-I noise PSD. References: `<https://arxiv.org/abs/gr-qc/0010009>`_ Args: f: frequency Returns: The noise PSD. """ fs = 40 # Hz f_theta = 150 # Hz x = f / f_theta normalization = 1e-46 return jnp.where( f > fs, normalization * 9 * ((4.49 * x) ** (-56) + 0.16 * x ** (-4.52) + 0.52 + 0.32 * x**2), jnp.inf, ) def _load_noise( name: str, asd: bool = False ) -> Tuple[Callable[[Array], Array], Tuple[float, float]]: r""" Loads noise PSD from text data file into an interpolator. The file's columns must contain the frequencies and corresponding noise power spectral density or amplitude spectral density values. Args: name: name of data file in ``noise_resources`` without the `.dat` extension asd: ``True`` if the file contains ASD (ie, sqrt(PSD)) rather than PSD data Returns Interpolator for noise PSD returning ``inf`` above and below the frequency range in the data file """ path_context = pkg_resources.path(noise_resources, f"{name}.dat") with path_context as path: fs, Sns = np.loadtxt(path, unpack=True) if asd: Sns = Sns**2 Sns[Sns == 0.0] = np.inf fs = jnp.array(fs) Sns = jnp.array(Sns) f_range = (fs[0], fs[-1]) return ( jax.jit(lambda f: jnp.interp(f, fs, Sns, left=jnp.inf, right=jnp.inf)), f_range, ) Sn_aLIGO, f_range_aLIGO = _load_noise("aLIGO", asd=True) r"""The advanced LIGO noise PSD and frequency range. References: `<https://dcc.ligo.org/LIGO-T1800044/public>`_ """ Sn_ce, f_range_ce = _load_noise("ce", asd=True) r"""The Cosmic Explorer noise PSD and frequency range. References: `<https://dcc.ligo.org/LIGO-P1600143/public>`_ """ Sn_et, f_range_et = _load_noise("et", asd=True) r"""The Einstein Telescope noise PSD and frequency range. References: `<http://www.et-gw.eu/index.php/etsensitivities>`_ """ Sn_aLIGOZeroDetHighPower, f_range_aLIGOZeroDetHighPower = _load_noise( "aLIGOZeroDetHighPower" ) r"""The aLIGOZeroDetHighPower noise PSD from pycbc and frequency range. References: ??? Args: f: frequency Returns: The noise power spectral density """ Sn_O3a, f_range_O3a = _load_noise("O3a_Livingston_ASD", asd=True) r"""The LIGO O3a Livingston noise PSD and frequency range. References: ??? Args: f: frequency Returns: The noise power spectral density """ Sn_O2, f_range_O2 = _load_noise("O2_ASD", asd=True) r"""The LIGO O2 noise PSD and frequency range. References: `<https://github.com/jroulet/template_bank>`_ Args: f: frequency Returns: The noise power spectral density """
/ripplegw-0.0.4-py3-none-any.whl/ripple/noise.py
0.924364
0.569942
noise.py
pypi
from math import pi from typing import Callable, Optional, Tuple import warnings from jax import random import jax.numpy as jnp from .constants import C, G from .typing import Array, PRNGKeyArray def Mc_eta_to_ms(m): r""" Converts chirp mass and symmetric mass ratio to binary component masses. Args: m: the binary component masses ``(Mchirp, eta)`` Returns: :math:`(m1, m2)`, with the chirp mass in the same units as the component masses """ Mchirp, eta = m M = Mchirp / (eta ** (3 / 5)) m2 = (M - jnp.sqrt(M**2 - 4 * M**2 * eta)) / 2 m1 = M - m2 return m1, m2 def ms_to_Mc_eta(m): r""" Converts binary component masses to chirp mass and symmetric mass ratio. Args: m: the binary component masses ``(m1, m2)`` Returns: :math:`(\mathcal{M}, \eta)`, with the chirp mass in the same units as the component masses """ m1, m2 = m return (m1 * m2) ** (3 / 5) / (m1 + m2) ** (1 / 5), m1 * m2 / (m1 + m2) ** 2 def get_f_isco(m): r""" Computes the ISCO frequency for a black hole. Args: m: the black hole's mass in kg Returns: The ISCO frequency in Hz """ return 1 / (6 ** (3 / 2) * pi * m / (C**3 / G)) def get_M_eta_sampler( M_range: Tuple[float, float], eta_range: Tuple[float, float] ) -> Callable[[PRNGKeyArray, int], Array]: """ Uniformly values of the chirp mass and samples over the specified ranges. This function may be removed in the future since it is trivial. """ def sampler(key, n): M_eta = random.uniform( key, minval=jnp.array([M_range[0], eta_range[0]]), maxval=jnp.array([M_range[1], eta_range[1]]), shape=(n, 2), ) return M_eta return sampler def get_m1_m2_sampler( m1_range: Tuple[float, float], m2_range: Tuple[float, float] ) -> Callable[[PRNGKeyArray, int], Array]: r""" Creates a function to uniformly sample two parameters, with the restriction that the first is larger than the second. Note: While this function is particularly useful for sampling masses in a binary, nothing in it is specific to that context. Args: m1_range: the minimum and maximum values of the first parameter m2_range: the minimum and maximum values of the second parameter Returns: The sampling function """ def sampler(key, n): ms = random.uniform( key, minval=jnp.array([m1_range[0], m2_range[0]]), maxval=jnp.array([m1_range[1], m2_range[1]]), shape=(n, 2), ) return jnp.stack([ms.max(axis=1), ms.min(axis=1)]).T # type: ignore return sampler def get_eff_pads(fs: Array) -> Tuple[Array, Array]: r""" Gets arrays of zeros to pad a function evaluated on a frequency grid so the function values can be passed to ``jax.numpy.fft.ifft``. Args: fs: uniformly-spaced grid of frequencies. It is assumed that the first element in the grid must be an integer multiple of the grid spacing (i.e., ``fs[0] % df == 0``, where ``df`` is the grid spacing). Returns: The padding arrays of zeros. The first is of length ``fs[0] / df`` and the second is of length ``fs[-1] / df - 2``. """ df = (fs[-1] - fs[0]) / (len(fs) - 1) if not jnp.allclose(jnp.diff(fs), df).all(): warnings.warn("frequency grid may not be evenly spaced") if fs[0] % df != 0 or fs[-1] % df != 0: warnings.warn( "The first and/or last elements of the frequency grid are not integer " "multiples of the grid spacing. The frequency grid and pads from this " "function will thus yield inaccurate results when used with fft/ifft." ) N = 2 * jnp.array(fs[-1] / df - 1).astype(int) pad_low = jnp.zeros(jnp.array(fs[0] / df).astype(int)) pad_high = jnp.zeros(N - jnp.array(fs[-1] / df).astype(int)) return pad_low, pad_high # pad_low, pad_high, Sns, h1s, h2s def get_phase_maximized_inner_product_arr( del_t: Array, fs: Array, Sns: Array, h1s: Array, h2s: Array ) -> Array: r""" Calculates the inner product between two waveforms, maximized over the difference in phase at coalescence. This is just the absolute value of the noise-weighted inner product. Args: del_t: difference in the time at coalescence for the waveforms h1s: the first set of strains h2s: the second set of strains Sns: the noise power spectral densities fs: uniformly-spaced grid of frequencies used to perform the integration Returns: The noise-weighted inner product between the waveforms, maximized over the phase at coalescence """ # Normalize both waveforms. Factors of 4 and df drop out. norm1 = jnp.sqrt(jnp.sum(jnp.abs(h1s) ** 2 / Sns)) norm2 = jnp.sqrt(jnp.sum(jnp.abs(h2s) ** 2 / Sns)) # Compute unnormalized match, maximizing over phi_0 by taking the absolute value integral = jnp.abs( jnp.sum(h1s.conj() * h2s * jnp.exp(1j * 2 * pi * fs * del_t) / Sns) ) return integral / (norm1 * norm2) def get_phase_maximized_inner_product( del_t: Array, fs: Array, Sn: Callable[[Array], Array], theta1: Array, theta2: Array, amp1: Callable[[Array, Array], Array], Psi1: Callable[[Array, Array], Array], amp2: Optional[Callable[[Array, Array], Array]], Psi2: Optional[Callable[[Array, Array], Array]], ) -> Array: r""" Calculates the inner product between two waveforms, maximized over the difference in phase at coalescence. This is just the absolute value of the noise-weighted inner product. Args: theta1: parameters for the first waveform theta2: parameters for the second waveform del_t: difference in the time at coalescence for the waveforms amp1: amplitude function for first waveform Psi1: phase function for first waveform amp2: amplitude function for second waveform Psi2: phase function for second waveform fs: uniformly-spaced grid of frequencies used to perform the integration Sn: power spectral density of the detector noise Returns: The noise-weighted inner product between the waveforms, maximized over the phase at coalescence """ h1s = amp1(fs, theta1) * jnp.exp(1j * Psi1(fs, theta1)) if amp2 is None: amp2 = amp1 if Psi2 is None: Psi2 = Psi1 h2s = amp2(fs, theta2) * jnp.exp(1j * Psi2(fs, theta2)) Sns = Sn(fs) return get_phase_maximized_inner_product_arr(del_t, fs, Sns, h1s, h2s) def get_match_arr( pad_low: Array, pad_high: Array, Sns: Array, h1s: Array, h2s: Array ) -> Array: """ Calculates the match between two frequency-domain complex strains. The maximizations over the difference in time and phase at coalescence are performed by taking the absolute value of the inverse Fourier transform. Args: h1s: the first set of strains h2s: the second set of strains Sns: the noise power spectral densities pad_low: array of zeros to pad the left side of the integrand before it is passed to ``jax.numpy.fft.ifft`` pad_right: array of zeros to pad the right side of the integrand before it is passed to ``jax.numpy.fft.ifft`` Returns: The match. """ # Factors of 4 and df drop out due to linearity norm1 = jnp.sqrt(jnp.sum(jnp.abs(h1s) ** 2 / Sns)) norm2 = jnp.sqrt(jnp.sum(jnp.abs(h2s) ** 2 / Sns)) # Use IFFT trick to maximize over t_c. Ref: Maggiore's book, eq. 7.171. integrand_padded = jnp.concatenate((pad_low, h1s.conj() * h2s / Sns, pad_high)) return jnp.abs(len(integrand_padded) * jnp.fft.ifft(integrand_padded)).max() / ( norm1 * norm2 ) def get_match( fs: Array, pad_low: Array, pad_high: Array, Sn: Callable[[Array], Array], theta1: Array, theta2: Array, amp1: Callable[[Array, Array], Array], Psi1: Callable[[Array, Array], Array], amp2: Optional[Callable[[Array, Array], Array]], Psi2: Optional[Callable[[Array, Array], Array]], ) -> Array: r""" Calculates the match between two waveforms with different parameters and of distinct types. The match is defined as the noise-weighted inner product maximized over the difference in time and phase at coalescence. The maximizations are performed using the absolute value of the inverse Fourier transform trick. Args: theta1: parameters for the first waveform theta2: parameters for the second waveform amp1: amplitude function for first waveform Psi1: phase function for first waveform amp2: amplitude function for second waveform Psi2: phase function for second waveform fs: uniformly-spaced grid of frequencies used to perform the integration Sn: power spectral density of the detector noise pad_low: array of zeros to pad the left side of the integrand before it is passed to ``jax.numpy.fft.ifft`` pad_right: array of zeros to pad the right side of the integrand before it is passed to ``jax.numpy.fft.ifft`` Returns: The match :math:`m[\theta_1, \theta_2]` """ h1s = amp1(fs, theta1) * jnp.exp(1j * Psi1(fs, theta1)) if amp2 is None: amp2 = amp1 if Psi2 is None: Psi2 = Psi1 h2s = amp2(fs, theta2) * jnp.exp(1j * Psi2(fs, theta2)) Sns = Sn(fs) return get_match_arr(pad_low, pad_high, Sns, h1s, h2s)
/ripplegw-0.0.4-py3-none-any.whl/ripple/__init__.py
0.979121
0.719347
__init__.py
pypi
import jax import jax.numpy as jnp from .IMRPhenomD_utils import ( get_coeffs, get_delta0, get_delta1, get_delta2, get_delta3, get_delta4, get_transition_frequencies, ) from .IMRPhenomD_QNMdata import fM_CUT from ..constants import EulerGamma, gt, m_per_Mpc, C, PI from ..typing import Array from ripple import Mc_eta_to_ms def get_inspiral_phase(fM_s: Array, theta: Array, coeffs: Array) -> Array: """ Calculate the inspiral phase for the IMRPhenomD waveform. """ # First lets calculate some of the vairables that will be used below # Mass variables m1, m2, chi1, chi2 = theta m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s eta = m1_s * m2_s / (M_s**2.0) # First lets construct the phase in the inspiral (region I) m1M = m1_s / M_s m2M = m2_s / M_s phi0 = 1.0 phi1 = 0.0 phi2 = 5.0 * (74.3 / 8.4 + 11.0 * eta) / 9.0 phi3 = -16.0 * PI + ( m1M * (25.0 + 38.0 / 3.0 * m1M) * chi1 + m2M * (25.0 + 38.0 / 3.0 * m2M) * chi2 ) phi4 = 5.0 * (3058.673 / 7.056 + 5429.0 / 7.0 * eta + 617.0 * eta * eta) / 72.0 phi4 += ( (247.0 / 4.8 * eta) * chi1 * chi2 + (-721.0 / 4.8 * eta) * chi1 * chi2 + ((-720.0 / 9.6 * m1M * m1M) + (1.0 / 9.6 * m1M * m1M)) * chi1 * chi1 + ((-720.0 / 9.6 * m2M * m2M) + (1.0 / 9.6 * m2M * m2M)) * chi2 * chi2 + ((240.0 / 9.6 * m1M * m1M) + (-7.0 / 9.6 * m1M * m1M)) * chi1 * chi1 + ((240.0 / 9.6 * m2M * m2M) + (-7.0 / 9.6 * m2M * m2M)) * chi2 * chi2 ) phi5 = 5.0 / 9.0 * (772.9 / 8.4 - 13.0 * eta) * PI phi5 += ( -m1M * ( 1391.5 / 8.4 - m1M * (1.0 - m1M) * 10.0 / 3.0 + m1M * (1276.0 / 8.1 + m1M * (1.0 - m1M) * 170.0 / 9.0) ) ) * chi1 + ( -m2M * ( 1391.5 / 8.4 - m2M * (1.0 - m2M) * 10.0 / 3.0 + m2M * (1276.0 / 8.1 + m2M * (1.0 - m2M) * 170.0 / 9.0) ) ) * chi2 phi5_log = (5.0 / 3.0) * (772.9 / 8.4 - 13.0 * eta) * PI phi5_log += 3.0 * ( ( -m1M * ( 1391.5 / 8.4 - m1M * (1.0 - m1M) * 10.0 / 3.0 + m1M * (1276.0 / 8.1 + m1M * (1.0 - m1M) * 170.0 / 9.0) ) ) * chi1 + ( -m2M * ( 1391.5 / 8.4 - m2M * (1.0 - m2M) * 10.0 / 3.0 + m2M * (1276.0 / 8.1 + m2M * (1.0 - m2M) * 170.0 / 9.0) ) ) * chi2 ) phi6 = ( ( 11583.231236531 / 4.694215680 - 640.0 / 3.0 * PI * PI - 684.8 / 2.1 * EulerGamma ) + eta * (-15737.765635 / 3.048192 + 225.5 / 1.2 * PI * PI) + eta * eta * 76.055 / 1.728 - eta * eta * eta * 127.825 / 1.296 + (-684.8 / 2.1) * jnp.log(4.0) ) phi6 += (PI * m1M * (1490.0 / 3.0 + m1M * 260.0)) * chi1 + ( PI * m2M * (1490.0 / 3.0 + m2M * 260.0) ) * chi2 phi6_log = -684.8 / 2.1 phi7 = PI * ( 770.96675 / 2.54016 + 378.515 / 1.512 * eta - 740.45 / 7.56 * eta * eta ) phi7 += ( m1M * ( -17097.8035 / 4.8384 + eta * 28764.25 / 6.72 + eta * eta * 47.35 / 1.44 + m1M * ( -7189.233785 / 1.524096 + eta * 458.555 / 3.024 - eta * eta * 534.5 / 7.2 ) ) ) * chi1 + ( m2M * ( -17097.8035 / 4.8384 + eta * 28764.25 / 6.72 + eta * eta * 47.35 / 1.44 + m2M * ( -7189.233785 / 1.524096 + eta * 458.555 / 3.024 - eta * eta * 534.5 / 7.2 ) ) ) * chi2 # Add frequency dependence here v = (PI * fM_s) ** (1.0 / 3.0) phi_TF2 = ( phi0 * ((PI * fM_s) ** -(5.0 / 3.0)) + phi1 * ((PI * fM_s) ** -(4.0 / 3.0)) + phi2 * ((PI * fM_s) ** -1.0) + phi3 * ((PI * fM_s) ** -(2.0 / 3.0)) + phi4 * ((PI * fM_s) ** -(1.0 / 3.0)) + phi5_log * jnp.log(v) + phi5 + phi6_log * jnp.log(v) * ((PI * fM_s) ** (1.0 / 3.0)) + phi6 * ((PI * fM_s) ** (1.0 / 3.0)) + phi7 * ((PI * fM_s) ** (2.0 / 3.0)) ) * (3.0 / (128.0 * eta)) - PI/4.0 phi_Ins = ( phi_TF2 + ( coeffs[7] * fM_s + (3.0 / 4.0) * coeffs[8] * (fM_s ** (4.0 / 3.0)) + (3.0 / 5.0) * coeffs[9] * (fM_s ** (5.0 / 3.0)) + (1.0 / 2.0) * coeffs[10] * (fM_s**2.0) ) / eta ) return phi_Ins def get_IIa_raw_phase(fM_s: Array, theta: Array, coeffs: Array) -> Array: m1, m2, _, _ = theta m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s eta = m1_s * m2_s / (M_s**2.0) phi_IIa_raw = ( coeffs[11] * fM_s + coeffs[12] * jnp.log(fM_s) - coeffs[13] * (fM_s**-3.0) / 3.0 ) / eta return phi_IIa_raw def get_IIb_raw_phase(fM_s: Array, theta: Array, coeffs: Array, f_RD, f_damp) -> Array: m1, m2, _, _ = theta m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s eta = m1_s * m2_s / (M_s**2.0) f_RDM_s = f_RD * M_s f_dampM_s = f_damp * M_s phi_IIb_raw = ( coeffs[14] * fM_s - coeffs[15] * (fM_s**-1.0) + 4.0 * coeffs[16] * (fM_s ** (3.0 / 4.0)) / 3.0 + coeffs[17] * jnp.arctan((fM_s - coeffs[18] * f_RDM_s) / f_dampM_s) ) / eta return phi_IIb_raw def get_Amp0(fM_s: Array, eta: float) -> Array: Amp0 = ( (2.0 / 3.0 * eta) ** (1.0 / 2.0) * (fM_s) ** (-7.0 / 6.0) * PI ** (-1.0 / 6.0) ) return Amp0 def get_inspiral_Amp(fM_s: Array, theta: Array, coeffs: Array) -> Array: # Below is taken from https://git.ligo.org/lscsoft/lalsuite/-/blob/master/lalsimulation/lib/LALSimIMRPhenomD_internals.c # Lines 302 --> 351 m1, m2, chi1, chi2 = theta m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s eta = m1_s * m2_s / (M_s**2.0) eta2 = eta * eta eta3 = eta * eta2 Seta = jnp.sqrt(1.0 - 4.0 * eta) SetaPlus1 = 1.0 + Seta # Spin variables chi12 = chi1 * chi1 chi22 = chi2 * chi2 # First lets construct the Amplitude in the inspiral (region I) A0 = 1.0 A2 = ((-969.0 + 1804.0 * eta) * PI ** (2.0 / 3.0)) / 672.0 A3 = ( ( chi1 * (81.0 * SetaPlus1 - 44.0 * eta) + chi2 * (81.0 - 81.0 * Seta - 44.0 * eta) ) * PI ) / 48.0 A4 = ( ( -27312085.0 - 10287648.0 * chi22 - 10287648.0 * chi12 * SetaPlus1 + 10287648.0 * chi22 * Seta + 24.0 * ( -1975055.0 + 857304.0 * chi12 - 994896.0 * chi1 * chi2 + 857304.0 * chi22 ) * eta + 35371056.0 * eta2 ) * (PI ** (4.0 / 3.0)) ) / 8.128512e6 A5 = ( (PI ** (5.0 / 3.0)) * ( chi2 * ( -285197.0 * (-1 + Seta) + 4 * (-91902.0 + 1579.0 * Seta) * eta - 35632.0 * eta2 ) + chi1 * ( 285197.0 * SetaPlus1 - 4.0 * (91902.0 + 1579.0 * Seta) * eta - 35632.0 * eta2 ) + 42840.0 * (-1.0 + 4.0 * eta) * PI ) ) / 32256.0 A6 = ( -( (PI**2.0) * ( -336.0 * ( -3248849057.0 + 2943675504.0 * chi12 - 3339284256.0 * chi1 * chi2 + 2943675504.0 * chi22 ) * eta2 - 324322727232.0 * eta3 - 7.0 * ( -177520268561.0 + 107414046432.0 * chi22 + 107414046432.0 * chi12 * SetaPlus1 - 107414046432.0 * chi22 * Seta + 11087290368.0 * (chi1 + chi2 + chi1 * Seta - chi2 * Seta) * PI ) + 12.0 * eta * ( -545384828789.0 - 176491177632.0 * chi1 * chi2 + 202603761360.0 * chi22 + 77616.0 * chi12 * (2610335.0 + 995766.0 * Seta) - 77287373856.0 * chi22 * Seta + 5841690624.0 * (chi1 + chi2) * PI + 21384760320.0 * (PI**2.0) ) ) ) / 6.0085960704e10 ) A7 = coeffs[0] A8 = coeffs[1] A9 = coeffs[2] Amp_Ins = ( A0 # A1 is missed since its zero + A2 * (fM_s ** (2.0 / 3.0)) + A3 * fM_s + A4 * (fM_s ** (4.0 / 3.0)) + A5 * (fM_s ** (5.0 / 3.0)) + A6 * (fM_s**2.0) # Now we add the coefficient terms + A7 * (fM_s ** (7.0 / 3.0)) + A8 * (fM_s ** (8.0 / 3.0)) + A9 * (fM_s**3.0) ) return Amp_Ins def get_IIa_Amp( fM_s: Array, theta: Array, coeffs: Array, f1, f3, f_RD, f_damp ) -> Array: m1, m2, _, _ = theta m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s # Central frequency point f2 = (f1 + f3) / 2 # For this region, we also need to calculate the the values and derivatives # of the Ins and IIb regions v1, d1 = jax.value_and_grad(get_inspiral_Amp)(f1 * M_s, theta, coeffs) v3, d3 = jax.value_and_grad(get_IIb_Amp)(f3 * M_s, theta, coeffs, f_RD, f_damp) # Here we need the delta solutions delta0 = get_delta0(f1 * M_s, f2 * M_s, f3 * M_s, v1, coeffs[3], v3, d1, d3) delta1 = get_delta1(f1 * M_s, f2 * M_s, f3 * M_s, v1, coeffs[3], v3, d1, d3) delta2 = get_delta2(f1 * M_s, f2 * M_s, f3 * M_s, v1, coeffs[3], v3, d1, d3) delta3 = get_delta3(f1 * M_s, f2 * M_s, f3 * M_s, v1, coeffs[3], v3, d1, d3) delta4 = get_delta4(f1 * M_s, f2 * M_s, f3 * M_s, v1, coeffs[3], v3, d1, d3) Amp_IIa = ( delta0 + delta1 * fM_s + delta2 * (fM_s**2.0) + delta3 * (fM_s**3.0) + delta4 * (fM_s**4.0) ) return Amp_IIa def get_IIb_Amp(fM_s: Array, theta: Array, coeffs: Array, f_RD, f_damp) -> Array: m1, m2, _, _ = theta m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s gamma1 = coeffs[4] gamma2 = coeffs[5] gamma3 = coeffs[6] fDM = f_damp * M_s fRD = f_RD * M_s fDMgamma3 = fDM * gamma3 fminfRD = fM_s - fRD Amp_IIb = ( jnp.exp(-(fminfRD) * gamma2 / (fDMgamma3)) * (fDMgamma3 * gamma1) / ((fminfRD) ** 2.0 + (fDMgamma3) ** 2.0) ) return Amp_IIb # @jax.jit def Phase(f: Array, theta: Array, coeffs: Array, transition_freqs: Array) -> Array: """ Computes the phase of the PhenomD waveform following 1508.07253. Sets time and phase of coealence to be zero. Returns: -------- phase (array): Phase of the GW as a function of frequency """ # First lets calculate some of the vairables that will be used below # Mass variables m1, m2, _, _ = theta m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s # Next we need to calculate the transition frequencies # f1, f2, _, _, f_RD, f_damp = get_transition_frequencies(theta, coeffs[5], coeffs[6]) f1, f2, _, _, f_RD, f_damp = transition_freqs phi_Ins = get_inspiral_phase(f * M_s, theta, coeffs) # Next lets construct the phase of the late inspiral (region IIa) # beta0 is found by matching the phase between the region I and IIa # C(1) continuity must be preserved. We therefore need to solve for an additional # contribution to beta1 # Note that derivatives seem to be d/d(fM_s), not d/df # Here I've now defined # phi_IIa(f1*M_s) + beta0 + beta1_correction*(f1*M_s) = phi_Ins(f1*M_s) # ==> phi_IIa'(f1*M_s) + beta1_correction = phi_Ins'(f1*M_s) # ==> beta1_correction = phi_Ins'(f1*M_s) - phi_IIa'(f1*M_s) # ==> beta0 = phi_Ins(f1*M_s) - phi_IIa(f1*M_s) - beta1_correction*(f1*M_s) phi_Ins_f1, dphi_Ins_f1 = jax.value_and_grad(get_inspiral_phase)( f1 * M_s, theta, coeffs ) phi_IIa_f1, dphi_IIa_f1 = jax.value_and_grad(get_IIa_raw_phase)( f1 * M_s, theta, coeffs ) beta1_correction = dphi_Ins_f1 - dphi_IIa_f1 beta0 = phi_Ins_f1 - beta1_correction * (f1 * M_s) - phi_IIa_f1 phi_IIa_func = ( lambda fM_s: get_IIa_raw_phase(fM_s, theta, coeffs) + beta1_correction * fM_s ) phi_IIa = phi_IIa_func(f * M_s) + beta0 # And finally, we do the same thing to get the phase of the merger-ringdown (region IIb) # phi_IIb(f2*M_s) + a0 + a1_correction*(f2*M_s) = phi_IIa(f2*M_s) # ==> phi_IIb'(f2*M_s) + a1_correction = phi_IIa'(f2*M_s) # ==> a1_correction = phi_IIa'(f2*M_s) - phi_IIb'(f2*M_s) # ==> a0 = phi_IIa(f2*M_s) - phi_IIb(f2*M_s) - beta1_correction*(f2*M_s) phi_IIa_f2, dphi_IIa_f2 = jax.value_and_grad(phi_IIa_func)(f2 * M_s) phi_IIb_f2, dphi_IIb_f2 = jax.value_and_grad(get_IIb_raw_phase)( f2 * M_s, theta, coeffs, f_RD, f_damp ) a1_correction = dphi_IIa_f2 - dphi_IIb_f2 a0 = phi_IIa_f2 + beta0 - a1_correction * (f2 * M_s) - phi_IIb_f2 phi_IIb = ( get_IIb_raw_phase(f * M_s, theta, coeffs, f_RD, f_damp) + a0 + a1_correction * (f * M_s) ) # And now we can combine them by multiplying by a set of heaviside functions phase = ( phi_Ins * jnp.heaviside(f1 - f, 0.5) + jnp.heaviside(f - f1, 0.5) * phi_IIa * jnp.heaviside(f2 - f, 0.5) + phi_IIb * jnp.heaviside(f - f2, 0.5) ) return phase # @jax.jit def Amp( f: Array, theta: Array, coeffs: Array, transition_frequencies: Array, D=1 ) -> Array: """ Computes the amplitude of the PhenomD frequency domain waveform following 1508.07253. Note that this waveform also assumes that object one is the more massive. Returns: -------- Amplitude (array): """ # First lets calculate some of the vairables that will be used below # Mass variables m1, m2, _, _ = theta m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s eta = m1_s * m2_s / (M_s**2.0) # _, _, f3, f4, f_RD, f_damp = get_transition_frequencies(theta, coeffs[5], coeffs[6]) _, _, f3, f4, f_RD, f_damp = transition_frequencies # First we get the inspiral amplitude Amp_Ins = get_inspiral_Amp(f * M_s, theta, coeffs) # Next lets construct the phase of the late inspiral (region IIa) # Note that this part is a little harder since we need to solve a system of equations for deltas Amp_IIa = get_IIa_Amp(f * M_s, theta, coeffs, f3, f4, f_RD, f_damp) # And finally, we construct the amplitude of the merger-ringdown (region IIb) Amp_IIb = get_IIb_Amp(f * M_s, theta, coeffs, f_RD, f_damp) # And now we can combine them by multiplying by a set of heaviside functions fcut_above = lambda f: (fM_CUT / M_s) fcut_below = lambda f: f[jnp.abs(f - (fM_CUT / M_s)).argmin() - 1] fcut_true = jax.lax.cond((fM_CUT / M_s) > f[-1], fcut_above, fcut_below, f) Amp = ( Amp_Ins * jnp.heaviside(f3 - f, 0.5) + jnp.heaviside(f - f3, 0.5) * Amp_IIa * jnp.heaviside(f4 - f, 0.5) + jnp.heaviside(f - f4, 0.5) * Amp_IIb * jnp.heaviside(fcut_true - f, 0.0) + 0.0 * jnp.heaviside(f - fcut_true, 1.0) ) # Prefactor Amp0 = get_Amp0(f * M_s, eta) * ( 2.0 * jnp.sqrt(5.0 / (64.0 * PI)) ) # This second factor is from lalsuite # Need to add in an overall scaling of M_s^2 to make the units correct dist_s = (D * m_per_Mpc) / C return Amp0 * Amp * (M_s**2.0) / dist_s # @jax.jit def _gen_IMRPhenomD( f: Array, theta_intrinsic: Array, theta_extrinsic: Array, coeffs: Array, f_ref: float, ): M_s = (theta_intrinsic[0] + theta_intrinsic[1]) * gt # Shift phase so that peak amplitude matches t = 0 transition_freqs = get_transition_frequencies(theta_intrinsic, coeffs[5], coeffs[6]) _, _, _, f4, f_RD, f_damp = transition_freqs t0 = jax.grad(get_IIb_raw_phase)(f4 * M_s, theta_intrinsic, coeffs, f_RD, f_damp) # Lets call the amplitude and phase now Psi = Phase(f, theta_intrinsic, coeffs, transition_freqs) Mf_ref = f_ref * M_s Psi_ref = Phase(f_ref, theta_intrinsic, coeffs, transition_freqs) Psi -= t0 * ((f * M_s) - Mf_ref) + Psi_ref ext_phase_contrib = 2.0 * PI * f * theta_extrinsic[1] - 2 * theta_extrinsic[2] Psi += ext_phase_contrib fcut_above = lambda f: (fM_CUT / M_s) fcut_below = lambda f: f[jnp.abs(f - (fM_CUT / M_s)).argmin() - 1] fcut_true = jax.lax.cond((fM_CUT / M_s) > f[-1], fcut_above, fcut_below, f) # fcut_true = f[jnp.abs(f - (fM_CUT / M_s)).argmin() - 1] Psi = Psi * jnp.heaviside(fcut_true - f, 0.0) + 2.0 * PI * jnp.heaviside( f - fcut_true, 1.0 ) A = Amp(f, theta_intrinsic, coeffs, transition_freqs, D=theta_extrinsic[0]) h0 = A * jnp.exp(1j * -Psi) return h0 def gen_IMRPhenomD(f: Array, params: Array, f_ref: float): """ Generate PhenomD frequency domain waveform following 1508.07253. vars array contains both intrinsic and extrinsic variables theta = [Mchirp, eta, chi1, chi2, D, tc, phic] Mchirp: Chirp mass of the system [solar masses] eta: Symmetric mass ratio [between 0.0 and 0.25] chi1: Dimensionless aligned spin of the primary object [between -1 and 1] chi2: Dimensionless aligned spin of the secondary object [between -1 and 1] D: Luminosity distance to source [Mpc] tc: Time of coalesence. This only appears as an overall linear in f contribution to the phase phic: Phase of coalesence f_ref: Reference frequency for the waveform Returns: -------- h0 (array): Strain """ # Lets make this easier by starting in Mchirp and eta space m1, m2 = Mc_eta_to_ms(jnp.array([params[0], params[1]])) theta_intrinsic = jnp.array([m1, m2, params[2], params[3]]) theta_extrinsic = jnp.array([params[4], params[5], params[6]]) coeffs = get_coeffs(theta_intrinsic) h0 = _gen_IMRPhenomD(f, theta_intrinsic, theta_extrinsic, coeffs, f_ref) return h0 def gen_IMRPhenomD_hphc(f: Array, params: Array, f_ref: float): """ Generate PhenomD frequency domain waveform following 1508.07253. vars array contains both intrinsic and extrinsic variables theta = [Mchirp, eta, chi1, chi2, D, tc, phic] Mchirp: Chirp mass of the system [solar masses] eta: Symmetric mass ratio [between 0.0 and 0.25] chi1: Dimensionless aligned spin of the primary object [between -1 and 1] chi2: Dimensionless aligned spin of the secondary object [between -1 and 1] D: Luminosity distance to source [Mpc] tc: Time of coalesence. This only appears as an overall linear in f contribution to the phase phic: Phase of coalesence inclination: Inclination angle of the binary [between 0 and PI] f_ref: Reference frequency for the waveform Returns: -------- hp (array): Strain of the plus polarization hc (array): Strain of the cross polarization """ iota = params[7] h0 = gen_IMRPhenomD(f, params, f_ref) hp = h0 * (1 / 2 * (1 + jnp.cos(iota) ** 2)) hc = -1j * h0 * jnp.cos(iota) return hp, hc
/ripplegw-0.0.4-py3-none-any.whl/ripple/waveforms/IMRPhenomD.py
0.61231
0.320296
IMRPhenomD.py
pypi
from typing import Tuple import jax.numpy as jnp import jax from ..constants import gt from ..typing import Array from .IMRPhenomD_QNMdata import QNMData_a, QNMData_fRD, QNMData_fdamp def EradRational0815_s(eta, s): eta2 = eta * eta eta3 = eta2 * eta eta4 = eta3 * eta return ( ( 0.055974469826360077 * eta + 0.5809510763115132 * eta2 - 0.9606726679372312 * eta3 + 3.352411249771192 * eta4 ) * ( 1.0 + ( -0.0030302335878845507 - 2.0066110851351073 * eta + 7.7050567802399215 * eta2 ) * s ) ) / ( 1.0 + (-0.6714403054720589 - 1.4756929437702908 * eta + 7.304676214885011 * eta2) * s ) def EradRational0815(eta, chi1, chi2): Seta = jnp.sqrt(1.0 - 4.0 * eta) m1 = 0.5 * (1.0 + Seta) m2 = 0.5 * (1.0 - Seta) m1s = m1 * m1 m2s = m2 * m2 s = (m1s * chi1 + m2s * chi2) / (m1s + m2s) return EradRational0815_s(eta, s) def FinalSpin0815_s(eta, S): eta2 = eta * eta eta3 = eta2 * eta S2 = S * S S3 = S2 * S return eta * ( 3.4641016151377544 - 4.399247300629289 * eta + 9.397292189321194 * eta2 - 13.180949901606242 * eta3 + S * ( (1.0 / eta - 0.0850917821418767 - 5.837029316602263 * eta) + (0.1014665242971878 - 2.0967746996832157 * eta) * S + (-1.3546806617824356 + 4.108962025369336 * eta) * S2 + (-0.8676969352555539 + 2.064046835273906 * eta) * S3 ) ) def get_fRD_fdamp(m1, m2, chi1, chi2): m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s eta_s = m1_s * m2_s / (M_s**2.0) S = (chi1 * m1_s**2 + chi2 * m2_s**2) / (M_s**2.0) # eta2 = eta_s * eta_s # eta3 = eta2 * eta_s # S2 = S * S # S3 = S2 * S # a = eta_s * ( # 3.4641016151377544 # - 4.399247300629289 * eta_s # + 9.397292189321194 * eta2 # - 13.180949901606242 * eta3 # + S # * ( # (1.0 / eta_s - 0.0850917821418767 - 5.837029316602263 * eta_s) # + (0.1014665242971878 - 2.0967746996832157 * eta_s) * S # + (-1.3546806617824356 + 4.108962025369336 * eta_s) * S2 # + (-0.8676969352555539 + 2.064046835273906 * eta_s) * S3 # ) # ) a = FinalSpin0815_s(eta_s, S) fRD = jnp.interp(a, QNMData_a, QNMData_fRD) / ( 1.0 - EradRational0815(eta_s, chi1, chi2) ) fdamp = jnp.interp(a, QNMData_a, QNMData_fdamp) / ( 1.0 - EradRational0815(eta_s, chi1, chi2) ) return fRD / M_s, fdamp / M_s def get_transition_frequencies( theta: Array, gamma2: float, gamma3: float ) -> Tuple[float, float, float, float, float, float]: m1, m2, chi1, chi2 = theta M = m1 + m2 f_RD, f_damp = get_fRD_fdamp(m1, m2, chi1, chi2) # Phase transition frequencies f1 = 0.018 / (M * gt) f2 = 0.5 * f_RD # Amplitude transition frequencies f3 = 0.014 / (M * gt) f4_gammaneg_gtr_1 = lambda f_RD_, f_damp_, gamma3_, gamma2_: jnp.abs( f_RD_ + (-f_damp_ * gamma3_) / gamma2_ ) f4_gammaneg_less_1 = lambda f_RD_, f_damp_, gamma3_, gamma2_: jnp.abs( f_RD_ + (f_damp_ * (-1 + jnp.sqrt(1 - (gamma2_) ** 2.0)) * gamma3_) / gamma2_ ) f4 = jax.lax.cond( gamma2 >= 1, f4_gammaneg_gtr_1, f4_gammaneg_less_1, f_RD, f_damp, gamma3, gamma2, ) return f1, f2, f3, f4, f_RD, f_damp @jax.jit def get_coeffs(theta: Array) -> Array: # Retrives the coefficients needed to produce the waveform m1, m2, chi1, chi2 = theta m1_s = m1 * gt m2_s = m2 * gt M_s = m1_s + m2_s eta = m1_s * m2_s / (M_s**2.0) # Definition of chiPN from lalsuite chi_s = (chi1 + chi2) / 2.0 chi_a = (chi1 - chi2) / 2.0 seta = (1 - 4 * eta) ** (1 / 2) chiPN = chi_s * (1 - 76 * eta / 113) + seta * chi_a coeff = ( PhenomD_coeff_table[:, 0] + PhenomD_coeff_table[:, 1] * eta + (chiPN - 1.0) * ( PhenomD_coeff_table[:, 2] + PhenomD_coeff_table[:, 3] * eta + PhenomD_coeff_table[:, 4] * (eta**2.0) ) + (chiPN - 1.0) ** 2.0 * ( PhenomD_coeff_table[:, 5] + PhenomD_coeff_table[:, 6] * eta + PhenomD_coeff_table[:, 7] * (eta**2.0) ) + (chiPN - 1.0) ** 3.0 * ( PhenomD_coeff_table[:, 8] + PhenomD_coeff_table[:, 9] * eta + PhenomD_coeff_table[:, 10] * (eta**2.0) ) ) # FIXME: Change to dictionary lookup return coeff def get_delta0(f1, f2, f3, v1, v2, v3, d1, d3): return ( -(d3 * f1**2 * (f1 - f2) ** 2 * f2 * (f1 - f3) * (f2 - f3) * f3) + d1 * f1 * (f1 - f2) * f2 * (f1 - f3) * (f2 - f3) ** 2 * f3**2 + f3**2 * ( f2 * (f2 - f3) ** 2 * (-4 * f1**2 + 3 * f1 * f2 + 2 * f1 * f3 - f2 * f3) * v1 + f1**2 * (f1 - f3) ** 3 * v2 ) + f1**2 * (f1 - f2) ** 2 * f2 * (f1 * f2 - 2 * f1 * f3 - 3 * f2 * f3 + 4 * f3**2) * v3 ) / ((f1 - f2) ** 2 * (f1 - f3) ** 3 * (f2 - f3) ** 2) def get_delta1(f1, f2, f3, v1, v2, v3, d1, d3): return ( d3 * f1 * (f1 - f3) * (f2 - f3) * (2 * f2 * f3 + f1 * (f2 + f3)) - ( f3 * ( d1 * (f1 - f2) * (f1 - f3) * (f2 - f3) ** 2 * (2 * f1 * f2 + (f1 + f2) * f3) + 2 * f1 * ( f3**4 * (v1 - v2) + 3 * f2**4 * (v1 - v3) + f1**4 * (v2 - v3) + 4 * f2**3 * f3 * (-v1 + v3) + 2 * f1**3 * f3 * (-v2 + v3) + f1 * ( 2 * f3**3 * (-v1 + v2) + 6 * f2**2 * f3 * (v1 - v3) + 4 * f2**3 * (-v1 + v3) ) ) ) ) / (f1 - f2) ** 2 ) / ((f1 - f3) ** 3 * (f2 - f3) ** 2) def get_delta2(f1, f2, f3, v1, v2, v3, d1, d3): return ( d1 * (f1 - f2) * (f1 - f3) * (f2 - f3) ** 2 * (f1 * f2 + 2 * (f1 + f2) * f3 + f3**2) - d3 * (f1 - f2) ** 2 * (f1 - f3) * (f2 - f3) * (f1**2 + f2 * f3 + 2 * f1 * (f2 + f3)) - 4 * f1**2 * f2**3 * v1 + 3 * f1 * f2**4 * v1 - 4 * f1 * f2**3 * f3 * v1 + 3 * f2**4 * f3 * v1 + 12 * f1**2 * f2 * f3**2 * v1 - 4 * f2**3 * f3**2 * v1 - 8 * f1**2 * f3**3 * v1 + f1 * f3**4 * v1 + f3**5 * v1 + f1**5 * v2 + f1**4 * f3 * v2 - 8 * f1**3 * f3**2 * v2 + 8 * f1**2 * f3**3 * v2 - f1 * f3**4 * v2 - f3**5 * v2 - (f1 - f2) ** 2 * ( f1**3 + f2 * (3 * f2 - 4 * f3) * f3 + f1**2 * (2 * f2 + f3) + f1 * (3 * f2 - 4 * f3) * (f2 + 2 * f3) ) * v3 ) / ((f1 - f2) ** 2 * (f1 - f3) ** 3 * (f2 - f3) ** 2) def get_delta3(f1, f2, f3, v1, v2, v3, d1, d3): return ( (d3 * (f1 - f3) * (2 * f1 + f2 + f3)) / (f2 - f3) - (d1 * (f1 - f3) * (f1 + f2 + 2 * f3)) / (f1 - f2) + ( 2 * ( f3**4 * (-v1 + v2) + 2 * f1**2 * (f2 - f3) ** 2 * (v1 - v3) + 2 * f2**2 * f3**2 * (v1 - v3) + 2 * f1**3 * f3 * (v2 - v3) + f2**4 * (-v1 + v3) + f1**4 * (-v2 + v3) + 2 * f1 * f3 * (f3**2 * (v1 - v2) + f2**2 * (v1 - v3) + 2 * f2 * f3 * (-v1 + v3)) ) ) / ((f1 - f2) ** 2 * (f2 - f3) ** 2) ) / (f1 - f3) ** 3 def get_delta4(f1, f2, f3, v1, v2, v3, d1, d3): return ( -(d3 * (f1 - f2) ** 2 * (f1 - f3) * (f2 - f3)) + d1 * (f1 - f2) * (f1 - f3) * (f2 - f3) ** 2 - 3 * f1 * f2**2 * v1 + 2 * f2**3 * v1 + 6 * f1 * f2 * f3 * v1 - 3 * f2**2 * f3 * v1 - 3 * f1 * f3**2 * v1 + f3**3 * v1 + f1**3 * v2 - 3 * f1**2 * f3 * v2 + 3 * f1 * f3**2 * v2 - f3**3 * v2 - (f1 - f2) ** 2 * (f1 + 2 * f2 - 3 * f3) * v3 ) / ((f1 - f2) ** 2 * (f1 - f3) ** 3 * (f2 - f3) ** 2) PhenomD_coeff_table = jnp.array( [ [ # rho1 (element 0) 3931.8979897196696, -17395.758706812805, 3132.375545898835, 343965.86092361377, -1.2162565819981997e6, -70698.00600428853, 1.383907177859705e6, -3.9662761890979446e6, -60017.52423652596, 803515.1181825735, -2.091710365941658e6, ], [ # rho2 (element 1) -40105.47653771657, 112253.0169706701, 23561.696065836168, -3.476180699403351e6, 1.137593670849482e7, 754313.1127166454, -1.308476044625268e7, 3.6444584853928134e7, 596226.612472288, -7.4277901143564405e6, 1.8928977514040343e7, ], [ # rho3 (element 2) 83208.35471266537, -191237.7264145924, -210916.2454782992, 8.71797508352568e6, -2.6914942420669552e7, -1.9889806527362722e6, 3.0888029960154563e7, -8.390870279256162e7, -1.4535031953446497e6, 1.7063528990822166e7, -4.2748659731120914e7, ], [ # v2 (element 3) 0.8149838730507785, 2.5747553517454658, 1.1610198035496786, -2.3627771785551537, 6.771038707057573, 0.7570782938606834, -2.7256896890432474, 7.1140380397149965, 0.1766934149293479, -0.7978690983168183, 2.1162391502005153, ], [ # gamma1 (element 4) 0.006927402739328343, 0.03020474290328911, 0.006308024337706171, -0.12074130661131138, 0.26271598905781324, 0.0034151773647198794, -0.10779338611188374, 0.27098966966891747, 0.0007374185938559283, -0.02749621038376281, 0.0733150789135702, ], [ # gamma2 (element 5) 1.010344404799477, 0.0008993122007234548, 0.283949116804459, -4.049752962958005, 13.207828172665366, 0.10396278486805426, -7.025059158961947, 24.784892370130475, 0.03093202475605892, -2.6924023896851663, 9.609374464684983, ], [ # gamma3 (element 6) 1.3081615607036106, -0.005537729694807678, -0.06782917938621007, -0.6689834970767117, 3.403147966134083, -0.05296577374411866, -0.9923793203111362, 4.820681208409587, -0.006134139870393713, -0.38429253308696365, 1.7561754421985984, ], [ # sig1 (element 7) 2096.551999295543, 1463.7493168261553, 1312.5493286098522, 18307.330017082117, -43534.1440746107, -833.2889543511114, 32047.31997183187, -108609.45037520859, 452.25136398112204, 8353.439546391714, -44531.3250037322, ], [ # sig2 (element 8) -10114.056472621156, -44631.01109458185, -6541.308761668722, -266959.23419307504, 686328.3229317984, 3405.6372187679685, -437507.7208209015, 1.6318171307344697e6, -7462.648563007646, -114585.25177153319, 674402.4689098676, ], [ # sig3 (element 9) 22933.658273436497, 230960.00814979506, 14961.083974183695, 1.1940181342318142e6, -3.1042239693052764e6, -3038.166617199259, 1.8720322849093592e6, -7.309145012085539e6, 42738.22871475411, 467502.018616601, -3.064853498512499e6, ], [ # sig4 (element 10) -14621.71522218357, -377812.8579387104, -9608.682631509726, -1.7108925257214056e6, 4.332924601416521e6, -22366.683262266528, -2.5019716386377467e6, 1.0274495902259542e7, -85360.30079034246, -570025.3441737515, 4.396844346849777e6, ], [ # beta1 (element 11) 97.89747327985583, -42.659730877489224, 153.48421037904913, -1417.0620760768954, 2752.8614143665027, 138.7406469558649, -1433.6585075135881, 2857.7418952430758, 41.025109467376126, -423.680737974639, 850.3594335657173, ], [ # beta2 (element 12) -3.282701958759534, -9.051384468245866, -12.415449742258042, 55.4716447709787, -106.05109938966335, -11.953044553690658, 76.80704618365418, -155.33172948098394, -3.4129261592393263, 25.572377569952536, -54.408036707740465, ], [ # beta3 (element 13) -0.000025156429818799565, 0.000019750256942201327, -0.000018370671469295915, 0.000021886317041311973, 0.00008250240316860033, 7.157371250566708e-6, -0.000055780000112270685, 0.00019142082884072178, 5.447166261464217e-6, -0.00003220610095021982, 0.00007974016714984341, ], [ # a1 (element 14) 43.31514709695348, 638.6332679188081, -32.85768747216059, 2415.8938269370315, -5766.875169379177, -61.85459307173841, 2953.967762459948, -8986.29057591497, -21.571435779762044, 981.2158224673428, -3239.5664895930286, ], [ # a2 (element 15) -0.07020209449091723, -0.16269798450687084, -0.1872514685185499, 1.138313650449945, -2.8334196304430046, -0.17137955686840617, 1.7197549338119527, -4.539717148261272, -0.049983437357548705, 0.6062072055948309, -1.682769616644546, ], [ # a3 (element 16) 9.5988072383479, -397.05438595557433, 16.202126189517813, -1574.8286986717037, 3600.3410843831093, 27.092429659075467, -1786.482357315139, 5152.919378666511, 11.175710130033895, -577.7999423177481, 1808.730762932043, ], [ # a4 (element 17) -0.02989487384493607, 1.4022106448583738, -0.07356049468633846, 0.8337006542278661, 0.2240008282397391, -0.055202870001177226, 0.5667186343606578, 0.7186931973380503, -0.015507437354325743, 0.15750322779277187, 0.21076815715176228, ], [ # a5 (element 18) 0.9974408278363099, -0.007884449714907203, -0.059046901195591035, 1.3958712396764088, -4.516631601676276, -0.05585343136869692, 1.7516580039343603, -5.990208965347804, -0.017945336522161195, 0.5965097794825992, -2.0608879367971804, ], ] )
/ripplegw-0.0.4-py3-none-any.whl/ripple/waveforms/IMRPhenomD_utils.py
0.7478
0.332907
IMRPhenomD_utils.py
pypi
[![DOI](http://joss.theoj.org/papers/10.21105/joss.00925/status.svg)](https://doi.org/10.21105/joss.00925) [![PyPI version](https://badge.fury.io/py/ripser.svg)](https://badge.fury.io/py/ripser) [![Downloads](https://pypip.in/download/ripser/badge.svg)](https://pypi.python.org/pypi/ripser/) [![Conda Version](https://img.shields.io/conda/vn/conda-forge/ripser.svg)](https://anaconda.org/conda-forge/ripser) [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/ripser.svg)](https://anaconda.org/conda-forge/ripser) [![Build Status](https://travis-ci.org/scikit-tda/ripser.py.svg?branch=master)](https://travis-ci.org/scikit-tda/ripser.py) [![Build status](https://ci.appveyor.com/api/projects/status/020nrvrq2rdg2iu1?svg=true)](https://ci.appveyor.com/project/sauln/ripser-py) [![codecov](https://codecov.io/gh/scikit-tda/ripser.py/branch/master/graph/badge.svg)](https://codecov.io/gh/scikit-tda/ripser.py) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) # Ripser.py Ripser.py is a lean persistent homology package for Python. Building on the blazing fast C++ Ripser package as the core computational engine, Ripser.py provides an intuitive interface for - computing persistence cohomology of sparse and dense data sets, - visualizing persistence diagrams, - computing lowerstar filtrations on images, and - computing representative cochains. Additionally, through extensive testing and continuous integration, Ripser.py is easy to install on Mac, Linux, and Windows platforms. To aid your use of the package, we've put together a large set of notebooks that demonstrate many of the features available. Complete documentation about the package can be found at [ripser.scikit-tda.org](https://ripser.scikit-tda.org). ## Related Projects If you're looking for the original C++ library, you can find it at [Ripser/ripser](https://github.com/ripser/ripser). If you're looking for a GPU-accelerated version of Ripser, you can find it at [Ripser++](https://github.com/simonzhang00/ripser-plusplus) ## Setup Ripser.py is available on all major platforms. All that is required is that you install the standard Python numerical computing libraries and Cython. Dependencies: - Cython - numpy - scipy - scikit-learn - persim **Windows users:** If you are using a Windows machine, you will also need to install [MinGW](http://www.mingw.org) on your system. **Mac users:** Updating your Xcode and Xcode command line tools will probably fix any issues you have with installation. Cython should be the only library required before installation. To install, type the following commands into your environment: ``` pip install cython pip install ripser ``` If you are having trouble installing, please let us know! ## Optional dependency Ripser.py when compiled from source can have a *steroid*<sup>1</sup> shot by replacing the standard `unordered_map` from the STL by one of the fastest implementation available: [robin_hood](https://github.com/martinus/robin-hood-hashing). Benchmarking of Ripser.py using the `robin_hood` implementation showed speed-ups up to **30%**. To be able to use `robin_hood` instead of STL, you only need to clone the repository containing the implementation: ``` # Run this command at the root of the project git clone https://github.com/martinus/robin-hood-hashing ripser/robinhood ``` <sup>1</sup> The Python package is already compiled with `robin_hood` by default. ## Usage The interface is as simple as can be: ``` import numpy as np from ripser import ripser from persim import plot_diagrams data = np.random.random((100,2)) diagrams = ripser(data)['dgms'] plot_diagrams(diagrams, show=True) ``` We also supply a Scikit-learn transformer style object if you would prefer to use that: ``` import numpy as np from ripser import Rips rips = Rips() data = np.random.random((100,2)) diagrams = rips.fit_transform(data) rips.plot(diagrams) ``` <img src="https://i.imgur.com/WmQPYnn.png" alt="Ripser.py output persistence diagram" width="70%"/> # Contributions We welcome all kinds of contributions! Please get in touch if you would like to help out. Everything from code to notebooks to examples and documentation are all equally valuable so please don't feel you can't contribute. To contribute please fork the project make your changes and submit a pull request. We will do our best to work through any issues with you and get your code merged into the main branch. If you found a bug, have questions, or are just having trouble with the library, please open an issue in our [issue tracker](https://github.com/scikit-tda/ripser.py/issues/new) and we'll try to help resolve the concern. # License Ripser.py is available under an MIT license! The core C++ code is derived from Ripser, which is also available under an MIT license and copyright to Ulrich Bauer. The modifications, Python code, and documentation is copyright to Christopher Tralie and Nathaniel Saul. # Citing If you use this package, please site the JoSS paper found here: [![DOI](http://joss.theoj.org/papers/10.21105/joss.00925/status.svg)](https://doi.org/10.21105/joss.00925) You can use the following bibtex entry ``` @article{ctralie2018ripser, doi = {10.21105/joss.00925}, url = {https://doi.org/10.21105/joss.00925}, year = {2018}, month = {Sep}, publisher = {The Open Journal}, volume = {3}, number = {29}, pages = {925}, author = {Christopher Tralie and Nathaniel Saul and Rann Bar-On}, title = {{Ripser.py}: A Lean Persistent Homology Library for Python}, journal = {The Journal of Open Source Software} } ```
/ripser-0.6.1.tar.gz/ripser-0.6.1/README.md
0.706292
0.932453
README.md
pypi
# Riptable Exercises This workbook is meant to give practical experience with the key ideas & functionality of Riptable. To complete it, you'll need to consult the [Intro to Riptable](tutorial.rst). Depending on your preferred learning style, you can read through the entire intro guide first or start with the exercises and refer to the guide as needed. Note that the intro guide has more coverage and detail, so it's well worth reading in full at some point. If you have any questions or comments, email RiptableDocumentation@sig.com. ``` import riptable as rt import numpy as np ``` ## Introduction to the Riptable Dataset **Datasets** are the core class of riptable. They are tables of data, consisting of a series of **columns** of the same length (sometimes referred to as **fields**). Structurally, they behave like python dictionaries, and can be created directly from one. We'll familiarize ourselves with Datasets by manually constructing one by generating fake sample data using `np.random.default_rng().choice(...)` or similar. In real life they will essentially always be generated from world data. **First, create a python dictionary with two fields of the same length (>1000); one column of stock prices and one of symbols.** **Make sure the symbols have duplicates, for later aggregation exercises.** ``` ``` **Create a riptable dataset from this, using** `rt.Dataset(my_dict)`. ``` ``` You can easily append more columns to a dataset. **Add a new column of integer trade size, using** `my_dset.Size = `. ``` ``` Columns can be referred with brackets around a string name as well. This is typically used when the column name comes from a variable. **Add a new column of booleans indicating whether you traded this trade, using** `my_dset['MyTrade'] =`. ``` ``` **Add a new column of string "Buy" or "Sell" indicating the customer direction.** ``` ``` Riptable will convert these lists to the riptable **FastArray** container and cast the data to an appropriate numpy datatype. **View the datatypes with** `my_dset.dtypes`. ``` ``` **View some sample rows of the dataset using** `.sample()`. You should use this instead of `.head()` because the initial rows of a dataset are often unrepresentative. ``` ``` **View distributional stats of the numerical fields of your dataset with** `.describe()`. You can call this on a single column as well. ``` ``` ## Manipulating data You can perform simple operation on riptable columns with normal python syntax. Riptable will do them to the whole column at once, efficiently. **Create a new column by performing scalar arithmetic on one of your numeric columns.** ``` ``` As long as the columns are the same size (as is guaranteed if they're in the same dataset) you can perform combining operations the same way. **Create a new column of total price paid for the trade by multiplying two existing columns together.** Riptable will automatically upcast types as necessary to preserve information. ``` ``` There are many built-in functions as well, which you call with either `my_dset.field.function()` or `rt.function(my_dset.field)` syntax. **Find the unique Symbols in your dataset.** ``` ``` ## Date/Time Riptable has three main date/time types: `Date`, `DateTimeNano`, and `TimeSpan`. **Give each row of your dataset an** `rt.Date`. **Make sure they're not all different, but still include days from multiple months.** Note that due to Riptable idiosyncracies you need to generate a list of yyyymmdd strings and pass into the `rt.Date(...)` constructor, not construct Dates individually. ``` ``` **Give each row a unique(ish)** `TimeSpan` **as a trade time.** You can instantiate them using `rt.TimeSpan(hours_var, unit='h')`. ``` ``` **Create a DateTimeNano of the combined TradeTime + Date by simple addition. Riptable knows how to sum the types.** Be careful here, by default you'll get a GMT timezone, you can force NYC with `rt.DateTimeNano(..., from_tz='NYC')`. ``` ``` To reverse this operation and get out separate dates and times from a DateTimeNano, you can call `rt.Date(my_DateTimeNano)` and `my_DateTimeNano.time_since_midnight()`. **Create a new month name column by using the** `.strftime` **function.** ``` ``` **Create another new month column by using the** `.start_of_month` **attribute.** This is nice for grouping because it will automatically sort correctly. ``` ``` ## Sorting Riptable has two sorts, `sort_copy` (which preserves the original dataset) and `sort_inplace`, which is faster and more memory-efficient if you don't need the original data order. **Sort your dataset by TradeDateTime.** This is the natural ordering of a list of trades, so do it in-place. ``` ``` ## Filtering Filtering is the principal way to work with a subset of your data in riptable. It is commonly used for looking at a restricted set of trades matching some criterion you care about. Except in rare instances, though, you should maintain your dataset in its full size, and only apply a filter when performing a final computation. This will avoid unnecessary data duplication and improve speed & memory usage. **Construct a filter of only your sales. (A filter is a column of Booleans which is true only for the rows you're interested in.)** You can combine filters using & or |. Be careful to always wrap expressions in parentheses to avoid an extremely slow call into native python followed by a crash. Always `(my_dset.field1 > 10) & (my_dset.field2 < 5)`, never `my_dset.field1 > 10 & my_dset.field2 > 5`. ``` ``` **Compute the total Trade Size, filtered for only your sales.** For this and many other instances, you can & should pass your filter into the `filter` kwarg of the `.nansum(...)` call. This allows riptable to perform the filtering during the nansum computation, rather than instantiating a new column and then summing it. ``` ``` **Count how many times you sold each symbol.** Here the `.count()` function doesn't accept a `filter` kwarg, so you must fall back to explicitly filtering the `Symbol` field before counting. Be careful that you only filter down the `Symbol` field, not the entire dataset, otherwise you are wasting a lot of compute. ``` ``` ## Categoricals So far, we've been operating on your symbol column as a column of strings. However, it's far more efficient when you have a large column with many repeats to use a categorical, which assigns each unique value a number, and stores the labels & numbers separately. This is memory-efficient, and also computationally efficient, as riptable can peform operations on the unique values, then expand out to the full vector appropriately. **Make a new column of your string column converted to a categorical, using** `rt.Cat(column)`. ``` ``` **Perform the same filtered count from above, on the categorical.** The categorical `.count()` admits a `filter` kwarg, which makes it simpler. ``` ``` Categoricals can be used as groupings. When you call a numeric function on a categorical and pass numeric columns in, riptable knows to do the calculation per-group. **Compute the total amount of contracts sold by customers in each symbol.** ``` ``` The `transform=True` kwarg in a categorical operation performs the aggregation, then *transforms* it back up to the original shape of the categorical, giving each row the appropriate value from its group. **Make a new column which is the average trade price, per symbol.** ``` ``` **Inspect with** `.sample()` **to confirm that this value is consistent for rows with matching symbol.** ``` ``` If you need to perform a custom operation on each categorical, you can pass in a function with `.apply_reduce` (which aggregates) or `.apply_nonreduce` (which is like `transform=True`). Note that the custom function you pass needs to expect a FastArray, and output a scalar (`apply_reduce`) or same-length FastArray (`apply_nonreduce`). **Find, for each symbol, the trade size of the second trade occuring in the dataset.** ``` ``` Sometimes you want to aggregate based on multiple values. In these cases we use multi-key categoricals. **Use a multi-key categorical to compute the average size per symbol-month pair.** ``` ``` ## Accumulating Aggregating over two values for human viewing is often most conveniently done with an accum. **Use** `Accum2` **to compute the average size per symbol-month pair.** ``` ``` Average numbers can be meaningless. It is often better to consider relative percentage instead. **Use** `accum_ratiop` **to compute the fraction of total volume done by each symbol-month pair.** ``` ``` ## Merging There are two main types of merges. First is `merge_lookup`. This is used for enriching one (typically large) dataset with information from another (typically small) dataset. **Create a new dataset with one row per symbol from your dataset, and a second column of who trades each symbol.** ``` ``` **Enrich the main dataset by putting the correct trader into each row.** ``` ``` The second type of merge is `merge_asof`, which is used for fuzzy alignment between two datasets, typically by time (though often by other variables). **Create a new index price dataset with one price per minute, which covers all the Dates in your dataset.** The index price doesn't need to be reasonable. Each row should have a DateTimeNano as the datetime. ``` ``` **Use** `merge_asof` **to get the most recent Index Price associated with each trade in your main dataset.** Note both datasets need to be sorted for merge_asof. The `on` kwarg is the numeric/time field that looks for close matches. The `by` kwarg is not necessary here, but could constrain the match to a subset if, for example, you had multiple indices and a column of which one each row is associated with. **Use** `direction='backward'` **to ensure you're not biasing your data by looking into the future!** ``` ``` ## Saving/Loading The native riptable filetype is .sds. It's the fastest way to save & load your data. **Save out your dataset to file using** `rt.save_sds`. ``` ``` **Delete your dataset to free up memory using the native python** `del my_dset`. Note that if there are references to the dataset in other objects you may not actually free up memory. ``` ``` **Reload your saved dataset from disk with** `rt.load_sds`. ``` ``` To load from h5 files (a common file type at SIG), use `rt.load_h5(file)`. To load from csv files, use the slow but robust pandas loader, with `rt.Dataset.from_pandas(pd.read_csv(file))`.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/RiptableExercises.ipynb
0.861916
0.99554
RiptableExercises.ipynb
pypi
Work with Dates and Times ========================= In Riptable, there are three fundamental date and time classes: - ``rt.Date``, used for date information with no time attached to it. - ``rt.DateTimeNano``, used for data with both date and time information (including time zone), to nanosecond precision. - ``rt.TimeSpan``, used for “time since midnight data,” with no date information attached. Here, we’ll cover how to create date and time objects, how to extract data from these objects, how to use date and time arithmetic to build useful date and time representations, and how to reformat date and time information for display. ``Date`` Objects ---------------- A Date object stores an array of dates with no time data attached. You can create Date arrays from strings, integer date values, or Matlab ordinal dates. For Matlab details, see `Matlab Dates and Times <https://www.mathworks.com/help/matlab/date-and-time-operations.html>`__. Creating Date arrays from strings is fairly common. If your string dates are in YYYYMMDD format, you can simply pass the list of strings to ``rt.Date()``:: >>> rt.Date(['20210101', '20210519', '20220308']) Date(['2021-01-01', '2021-05-19', '2022-03-08']) If your string dates are in another format, you can tell ``rt.Date()`` what to expect using Python ``strptime`` format code:: >>> rt.Date(['12/31/19', '6/30/19', '02/21/19'], format='%m/%d/%y') Date(['2019-12-31', '2019-06-30', '2019-02-21']) For a list of format codes and ``strptime`` implementation details, see `Python’s 'strftime' cheatsheet <https://strftime.org/>`__. The formatting codes are the same for ``strftime`` and ``strptime``. Note: Under the hood, dates are stored as integers – specifically, as the number of days since the Unix epoch, 01-01-1970:: >>> date_arr = rt.Date(['19700102', '19700103', '19700212']) >>> date_arr._fa FastArray([ 1, 2, 42]) Dates have various properties (a.k.a. attributes) that give you information about a Date. Let’s create a Dataset with a column of Dates, then use Date properties to extract information into new columns:: >>> ds = rt.Dataset() >>> # Generate a range of dates, spaced 15 days apart >>> ds.Dates = rt.Date.range('2019-01-01', '2019-02-30', step=15) >>> # Some useful Date properties >>> ds.Year = ds.Dates.year >>> ds.Month = ds.Dates.month # 1=Jan, 12=Dec >>> ds.Day_of_Month = ds.Dates.day_of_month >>> ds.Day_of_Week = ds.Dates.day_of_week # 0=Mon, 6=Sun >>> ds.Day_of_Year = ds.Dates.day_of_year >>> ds # Dates Year Month Day_of_Month Day_of_Week Day_of_Year - ---------- ----- ----- ------------ ----------- ----------- 0 2019-01-01 2,019 1 1 1 1 1 2019-01-16 2,019 1 16 2 16 2 2019-01-31 2,019 1 31 3 31 3 2019-02-15 2,019 2 15 4 46 The following two properties are particularly useful when you want to group data by month or week. We’ll see some examples when we talk about Categoricals and Accums:: >>> ds.Start_of_Month = ds.Dates.start_of_month >>> ds.Start_of_Week = ds.Dates.start_of_week # Returns the date of the previous Monday >>> ds # Dates Year Month Day_of_Month Day_of_Week Day_of_Year Start_of_Month Start_of_Week - ---------- ----- ----- ------------ ----------- ----------- -------------- ------------- 0 2019-01-01 2,019 1 1 1 1 2019-01-01 2018-12-31 1 2019-01-16 2,019 1 16 2 16 2019-01-01 2019-01-14 2 2019-01-31 2,019 1 31 3 31 2019-01-01 2019-01-28 3 2019-02-15 2,019 2 15 4 46 2019-02-01 2019-02-11 We used Python’s ``strptime`` format code above to tell ``rt.Date()`` how to parse our data. Riptable date and time objects can also use the ``strftime()`` method to format data for display:: >>> ds.MonthYear = ds.Dates.strftime('%b%y') >>> ds.col_filter(['Dates', 'MonthYear']) # Dates MonthYear - ---------- --------- 0 2019-01-01 Jan19 1 2019-01-16 Jan19 2 2019-01-31 Jan19 3 2019-02-15 Feb19 You can do some arithmetic with date and time objects. For example, we can get the number of days between two dates by subtracting one date from another:: >>> date_span = ds.Dates.max() - ds.Dates.min() >>> date_span DateSpan(['45 days']) This returns a DateSpan object, which is a way to represent the delta, or duration, between two dates. You can convert it to an integer if you prefer:: >>> date_span.astype(int) FastArray([45]) If you add a DateSpan to a Date, you get a Date:: >>> ds.Dates.min() + date_span Date(['2019-02-15']) Subtracting an array of dates from an array of dates gives you an array of DateSpans. The two Date arrays must be the same length:: >>> ds.DateDiff = ds.Dates - ds.Start_of_Month >>> ds.col_filter(['Dates', 'Start_of_Month', 'DateDiff']) # Dates Start_of_Month DateDiff - ---------- -------------- -------- 0 2019-01-01 2019-01-01 0 days 1 2019-01-16 2019-01-01 15 days 2 2019-01-31 2019-01-01 30 days 3 2019-02-15 2019-02-01 14 days Or you can subtract one Date from every record in a Date array:: >>> ds.Dates2 = ds.Dates - rt.Date('20190102') >>> ds.col_filter(['Dates', 'Dates2']) # Dates Dates2 - ---------- ------- 0 2019-01-01 -1 days 1 2019-01-16 14 days 2 2019-01-31 29 days 3 2019-02-15 44 days ``DateTimeNano`` Objects ------------------------ A ``DateTimeNano`` object stores data that has both date and time information, with the time specified to nanosecond precision. Like ``Date`` objects, ``DateTimeNano`` objects can be created from strings. Strings are common when the data is from, say, a CSV file. Unlike ``Date`` objects, ``DateTimeNano``\ s are time-zone-aware. When you create a ``DateTimeNano``, you need to specify the time zone of origin with the ``from_tz`` argument. Since Riptable is mainly used for financial market data, its time zone options are limited to NYC, DUBLIN, and (as of Riptable 1.3.6) Australia/Sydney, plus GMT and UTC (which is an alias for GMT). (If you’re wondering why ‘Australia/Sydney’ isn’t abbreviated, it’s because Riptable uses the standard time zone name from the `tz database <https://en.wikipedia.org/wiki/Tz_database>`__. In the future, Riptable will support only the `standard names <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones>`__ in the tz database.) :: >>> rt.DateTimeNano(['20210101 09:31:15', '20210519 05:21:17'], from_tz='GMT') DateTimeNano(['20210101 04:31:15.000000000', '20210519 01:21:17.000000000'], to_tz='NYC') Notice that the ``DateTimeNano`` is returned with ``to_tz='NYC'``. This is the time zone the data is displayed in; NYC is the default. You can change the display time zone when you create the ``DateTimeNano`` by using ``to_tz``:: >>> time_arr = rt.DateTimeNano(['20210101 09:31:15', '20210519 05:21:17'], ... from_tz='GMT', to_tz='GMT') >>> time_arr DateTimeNano(['20210101 09:31:15.000000000', '20210519 05:21:17.000000000'], to_tz='GMT') And as with Dates, you can specify the format of your string data:: >>> rt.DateTimeNano(['12/31/19', '6/30/19'], format='%m/%d/%y', from_tz='NYC') DateTimeNano(['20191231 00:00:00.000000000', '20190630 00:00:00.000000000'], to_tz='NYC') When you’re dealing with large amounts of data, it’s more typical to get dates and times that are represented as nanoseconds since the Unix epoch (01-01-1970). In fact, that is how ``DateTimeNano`` objects are stored (it’s much more efficient to store numbers than strings):: >>> time_arr._fa FastArray([1609493475000000000, 1621401677000000000], dtype=int64) If your data comes in this way, ``rt.DateTimeNano()`` can convert it easily. Just supply the time zone:: >>> rt.DateTimeNano([1609511475000000000, 1621416077000000000], from_tz='NYC') DateTimeNano(['20210101 14:31:15.000000000', '20210519 09:21:17.000000000'], to_tz='NYC') To split the date off a DateTimeNano, use ``rt.Date()``:: >>> rt.Date(time_arr) Date(['2021-01-01', '2021-05-19']) To get the time, use ``time_since_midnight()``:: >>> time_arr.time_since_midnight() TimeSpan(['09:31:15.000000000', '05:21:17.000000000']) Note that the result is a TimeSpan. We’ll look at these more in the next section. You can also get the time in nanoseconds since midnight:: >>> time_arr.nanos_since_midnight() FastArray([34275000000000, 19277000000000], dtype=int64) ``DateTimeNano``\ s can be reformatted for display using ``strftime()``:: >>> time_arr.strftime('%m/%d/%y %H:%M:%S') # Date and time array(['01/01/21 09:31:15', '05/19/21 05:21:17'], dtype=object) Just the time:: >>> time_arr.strftime('%H:%M:%S') array(['09:31:15', '05:21:17'], dtype=object) Some arithmetic:: >>> # Create two DateTimeNano arrays >>> time_arr1 = rt.DateTimeNano(['20220101 12:00:00', '20220301 13:00:00'], from_tz='NYC', to_tz='NYC') >>> time_arr2 = rt.DateTimeNano(['20190101 11:00:00', '20190301 11:30:00'], from_tz='NYC', to_tz='NYC') ``DateTimeNano`` - ``DateTimeNano`` = ``TimeSpan`` :: >>> timespan1 = time_arr1 - time_arr2 >>> timespan1 TimeSpan(['1096d 01:00:00.000000000', '1096d 01:30:00.000000000']) ``DateTimeNano`` + ``TimeSpan`` = ``DateTimeNano`` :: >>> dtn1 = time_arr1 + timespan1 >>> dtn1 DateTimeNano(['20250101 13:00:00.000000000', '20250301 14:30:00.000000000'], to_tz='NYC') ``DateTimeNano`` - ``TimeSpan`` = ``DateTimeNano`` :: >>> dtn2 = dtn1 - timespan1 >>> dtn2 DateTimeNano(['20220101 12:00:00.000000000', '20220301 13:00:00.000000000'], to_tz='NYC') ``TimeSpan`` Objects -------------------- You saw above how a ``TimeSpan`` represents a duration of time between two ``DateTimeNano``\ s. You can also think of it as a representation of a time of day. Recall that you can split a ``TimeSpan`` off a ``DateTimeNano`` using ``time_since_midnight()``. Just keep in mind that a ``TimeSpan`` by itself has no absolute reference to Midnight of any day in particular. As an example, let’s say you want to find out which trades were made before a certain time of day (on any day). If your data has ``DateTimeNano``\ s, you can split off the ``TimeSpan``, then filter for the times you’re interested in:: >>> rng = np.random.default_rng(seed=42) >>> ds = rt.Dataset() >>> N = 100 # Length of the Dataset >>> ds.Symbol = rt.FA(rng.choice(['AAPL', 'AMZN', 'TSLA', 'SPY', 'GME'], N)) >>> ds.Size = rng.random(N) * 100 >>> # Create a column of randomly generated DateTimeNanos >>> ds.TradeDateTime = rt.DateTimeNano.random(N) >>> ds.TradeTime = ds.TradeDateTime.time_since_midnight() >>> ds # Symbol Size TradeDateTime TradeTime --- ------ ----- --------------------------- ------------------ 0 AAPL 19.99 20190614 13:07:21.352420597 13:07:21.352420597 1 SPY 0.74 19970809 19:34:40.178693393 19:34:40.178693393 2 SPY 78.69 19861130 20:06:31.775222495 20:06:31.775222495 3 TSLA 66.49 20081111 04:15:24.079385833 04:15:24.079385833 4 TSLA 70.52 20190419 06:21:31.197889103 06:21:31.197889103 5 GME 78.07 19861112 05:20:14.239289462 05:20:14.239289462 6 AAPL 45.89 20110329 20:55:07.198530171 20:55:07.198530171 7 SPY 56.87 19780303 03:19:32.676920289 03:19:32.676920289 8 AMZN 13.98 19930305 22:34:02.767331408 22:34:02.767331408 9 AAPL 11.45 19840723 04:08:10.118105881 04:08:10.118105881 10 TSLA 66.84 19940814 03:08:03.730164619 03:08:03.730164619 11 GME 47.11 19730612 22:33:46.871406555 22:33:46.871406555 12 SPY 56.52 19840118 14:01:10.111423986 14:01:10.111423986 13 SPY 76.50 19740813 15:26:44.457459450 15:26:44.457459450 14 SPY 63.47 20050106 18:13:57.982489010 18:13:57.982489010 ... ... ... ... ... 85 SPY 2.28 19930706 00:24:05.337093375 00:24:05.337093375 86 AAPL 95.86 20140823 11:35:14.816318096 11:35:14.816318096 87 AMZN 48.23 20070929 22:49:10.456157805 22:49:10.456157805 88 SPY 78.27 19930616 20:30:27.490477141 20:30:27.490477141 89 GME 8.27 19860626 07:48:16.756213658 07:48:16.756213658 90 TSLA 48.67 20060824 19:29:19.583638324 19:29:19.583638324 91 GME 49.07 19751026 20:29:32.616225869 20:29:32.616225869 92 GME 93.78 19911222 14:53:30.879285646 14:53:30.879285646 93 AMZN 57.17 19970715 20:26:36.179803660 20:26:36.179803660 94 GME 47.35 19961214 10:26:16.609357094 10:26:16.609357094 95 AMZN 26.70 19830606 14:02:30.699183111 14:02:30.699183111 96 AMZN 33.16 19821114 05:56:13.504071773 05:56:13.504071773 97 SPY 52.07 19740606 03:47:03.798827481 03:47:03.798827481 98 SPY 43.89 19881226 22:19:55.209671459 22:19:55.209671459 99 AAPL 2.16 19840720 11:51:26.734190049 11:51:26.734190049 If we want to find the trades that happened before 10:00 a.m., we need a TimeSpan that represents 10:00 a.m. Then we can can compare our TradeTimes against it. To construct a TimeSpan from scratch, you can pass time strings in ``%H:%M:%S`` format:: >>> rt.TimeSpan(['09:00', '10:45', '02:30', '15:00', '23:10']) TimeSpan(['09:00:00.000000000', '10:45:00.000000000', '02:30:00.000000000', '15:00:00.000000000', '23:10:00.000000000']) Or from an array of numerics, along with a unit, like hours:: >>> rt.TimeSpan([9, 10, 12, 14, 18], unit='h') TimeSpan(['09:00:00.000000000', '10:00:00.000000000', '12:00:00.000000000', '14:00:00.000000000', '18:00:00.000000000']) For our purposes, this will do:: >>> tenAM = rt.TimeSpan(10, unit='h') >>> tenAM TimeSpan(['10:00:00.000000000']) Now we can compare the TradeTime values against it. We’ll put the results of the comparison into a column so we can spot check them:: >>> ds.TradesBefore10am = (ds.TradeTime < tenAM) >>> ds # Symbol Size TradeDateTime TradeTime TradesBefore10am --- ------ ----- --------------------------- ------------------ ---------------- 0 AAPL 19.99 20190614 13:07:21.352420597 13:07:21.352420597 False 1 SPY 0.74 19970809 19:34:40.178693393 19:34:40.178693393 False 2 SPY 78.69 19861130 20:06:31.775222495 20:06:31.775222495 False 3 TSLA 66.49 20081111 04:15:24.079385833 04:15:24.079385833 True 4 TSLA 70.52 20190419 06:21:31.197889103 06:21:31.197889103 True 5 GME 78.07 19861112 05:20:14.239289462 05:20:14.239289462 True 6 AAPL 45.89 20110329 20:55:07.198530171 20:55:07.198530171 False 7 SPY 56.87 19780303 03:19:32.676920289 03:19:32.676920289 True 8 AMZN 13.98 19930305 22:34:02.767331408 22:34:02.767331408 False 9 AAPL 11.45 19840723 04:08:10.118105881 04:08:10.118105881 True 10 TSLA 66.84 19940814 03:08:03.730164619 03:08:03.730164619 True 11 GME 47.11 19730612 22:33:46.871406555 22:33:46.871406555 False 12 SPY 56.52 19840118 14:01:10.111423986 14:01:10.111423986 False 13 SPY 76.50 19740813 15:26:44.457459450 15:26:44.457459450 False 14 SPY 63.47 20050106 18:13:57.982489010 18:13:57.982489010 False ... ... ... ... ... ... 85 SPY 2.28 19930706 00:24:05.337093375 00:24:05.337093375 True 86 AAPL 95.86 20140823 11:35:14.816318096 11:35:14.816318096 False 87 AMZN 48.23 20070929 22:49:10.456157805 22:49:10.456157805 False 88 SPY 78.27 19930616 20:30:27.490477141 20:30:27.490477141 False 89 GME 8.27 19860626 07:48:16.756213658 07:48:16.756213658 True 90 TSLA 48.67 20060824 19:29:19.583638324 19:29:19.583638324 False 91 GME 49.07 19751026 20:29:32.616225869 20:29:32.616225869 False 92 GME 93.78 19911222 14:53:30.879285646 14:53:30.879285646 False 93 AMZN 57.17 19970715 20:26:36.179803660 20:26:36.179803660 False 94 GME 47.35 19961214 10:26:16.609357094 10:26:16.609357094 False 95 AMZN 26.70 19830606 14:02:30.699183111 14:02:30.699183111 False 96 AMZN 33.16 19821114 05:56:13.504071773 05:56:13.504071773 True 97 SPY 52.07 19740606 03:47:03.798827481 03:47:03.798827481 True 98 SPY 43.89 19881226 22:19:55.209671459 22:19:55.209671459 False 99 AAPL 2.16 19840720 11:51:26.734190049 11:51:26.734190049 False And of course, we can use the Boolean array to filter the Dataset:: >>> ds.filter(ds.TradesBefore10am) # Symbol Size TradeDateTime TradeTime TradesBefore10am --- ------ ----- --------------------------- ------------------ ---------------- 0 TSLA 66.49 20081111 04:15:24.079385833 04:15:24.079385833 True 1 TSLA 70.52 20190419 06:21:31.197889103 06:21:31.197889103 True 2 GME 78.07 19861112 05:20:14.239289462 05:20:14.239289462 True 3 SPY 56.87 19780303 03:19:32.676920289 03:19:32.676920289 True 4 AAPL 11.45 19840723 04:08:10.118105881 04:08:10.118105881 True 5 TSLA 66.84 19940814 03:08:03.730164619 03:08:03.730164619 True 6 SPY 55.36 20010615 00:14:45.718385740 00:14:45.718385740 True 7 GME 23.39 19751116 06:06:50.777397710 06:06:50.777397710 True 8 TSLA 29.36 19920606 01:44:12.762930709 01:44:12.762930709 True 9 GME 66.19 20150907 07:56:58.291001076 07:56:58.291001076 True 10 GME 46.19 19771105 07:18:54.592658284 07:18:54.592658284 True 11 SPY 50.10 19980211 08:39:58.366644251 08:39:58.366644251 True 12 AAPL 15.23 19840811 03:03:32.341618015 03:03:32.341618015 True 13 AMZN 38.10 19730321 08:49:53.629495873 08:49:53.629495873 True 14 AAPL 30.15 20091103 04:56:46.941815206 04:56:46.941815206 True ... ... ... ... ... ... 19 GME 75.85 19870605 00:16:50.617990376 00:16:50.617990376 True 20 AAPL 43.21 19880730 01:20:25.325405869 01:20:25.325405869 True 21 AAPL 64.98 19750705 03:28:57.626851689 03:28:57.626851689 True 22 AAPL 41.58 19900712 07:39:20.866244793 07:39:20.866244793 True 23 SPY 4.16 20090512 03:17:20.112309966 03:17:20.112309966 True 24 AMZN 32.99 20010910 02:18:44.384567415 02:18:44.384567415 True 25 AMZN 14.45 19901004 00:53:54.407173923 00:53:54.407173923 True 26 TSLA 10.34 19961220 04:54:14.777983172 04:54:14.777983172 True 27 SPY 58.76 20070922 04:55:14.156355503 04:55:14.156355503 True 28 TSLA 92.51 19851209 01:52:03.199471749 01:52:03.199471749 True 29 GME 34.69 20160202 09:57:41.083925341 09:57:41.083925341 True 30 SPY 2.28 19930706 00:24:05.337093375 00:24:05.337093375 True 31 GME 8.27 19860626 07:48:16.756213658 07:48:16.756213658 True 32 AMZN 33.16 19821114 05:56:13.504071773 05:56:13.504071773 True 33 SPY 52.07 19740606 03:47:03.798827481 03:47:03.798827481 True If we only want to see certain columns of the Dataset, we can combine the filter with slicing:: >>> ds[ds.TradesBefore10am, ['Symbol', 'Size']] # Symbol Size --- ------ ----- 0 TSLA 66.49 1 TSLA 70.52 2 GME 78.07 3 SPY 56.87 4 AAPL 11.45 5 TSLA 66.84 6 SPY 55.36 7 GME 23.39 8 TSLA 29.36 9 GME 66.19 10 GME 46.19 11 SPY 50.10 12 AAPL 15.23 13 AMZN 38.10 14 AAPL 30.15 ... ... ... 19 GME 75.85 20 AAPL 43.21 21 AAPL 64.98 22 AAPL 41.58 23 SPY 4.16 24 AMZN 32.99 25 AMZN 14.45 26 TSLA 10.34 27 SPY 58.76 28 TSLA 92.51 29 GME 34.69 30 SPY 2.28 31 GME 8.27 32 AMZN 33.16 33 SPY 52.07 Or if we just want the total size of AAPL trades before 10am:: >>> aapl10 = (ds.Symbol == 'AAPL') & (ds.TradesBefore10am) >>> ds.Size.nansum(filter = aapl10) 274.92741837733035 Other Useful things to Do with TimeSpans ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We can compare two ``DateTimeNano`` columns to find times that are close together – for example, those less than 10ms apart. To illustrate this, we’ll create some randomly generated small ``TimeSpan``\ s to add to our column of ``DateTimeNano``\ s:: >>> # Create TimeSpans from 1 millisecond to 19 milliseconds >>> some_ms = rt.TimeSpan(rng.integers(low=1, high=20, size=N), 'ms') >>> # Offset the TimeSpans in our original DateTimeNano >>> ds.TradeDateTime2 = ds.TradeDateTime + some_ms >>> ds.col_filter(['Symbol', 'TradeDateTime', 'TradeDateTime2']).head() # Symbol TradeDateTime TradeDateTime2 -- ------ --------------------------- --------------------------- 0 AAPL 20100614 01:47:46.306210225 20100614 01:47:46.313210225 1 SPY 20131004 12:02:28.251037257 20131004 12:02:28.267037257 2 SPY 19721212 00:54:12.641763127 19721212 00:54:12.642763127 3 TSLA 19720118 19:33:36.911790260 19720118 19:33:36.929790260 4 TSLA 19750331 15:04:15.847968984 19750331 15:04:15.858968984 5 GME 19740912 18:18:46.660464416 19740912 18:18:46.663464416 6 AAPL 19820906 09:31:02.911852383 19820906 09:31:02.917852383 7 SPY 19900810 10:42:02.603793160 19900810 10:42:02.614793160 8 AMZN 19870318 06:54:30.389382275 19870318 06:54:30.395382275 9 AAPL 20031029 09:53:06.898676308 20031029 09:53:06.901676308 10 TSLA 20160319 00:33:40.035581577 20160319 00:33:40.048581577 11 GME 19801024 01:38:46.310440408 19801024 01:38:46.323440408 12 SPY 19791105 17:08:46.460502123 19791105 17:08:46.463502123 13 SPY 20110304 07:11:03.437823831 20110304 07:11:03.443823831 14 SPY 20140303 01:58:10.917868743 20140303 01:58:10.922868743 15 SPY 19990514 19:33:06.261903491 19990514 19:33:06.274903491 16 TSLA 19840808 16:34:56.776803922 19840808 16:34:56.790803922 17 AAPL 19711222 11:39:46.898769893 19711222 11:39:46.912769893 18 GME 20090605 13:23:02.120390523 20090605 13:23:02.138390523 19 TSLA 19900227 19:36:40.067192555 19900227 19:36:40.082192555 Now we can find the trades that occurred within 10ms of each other, and again put the results into a new column for a spot check. >>> ds.Within10ms = (abs(ds.TradeDateTime.time_since_midnight() ... - ds.TradeDateTime2.time_since_midnight())) < rt.TimeSpan(10, 'ms') >>> ds.col_filter(['Symbol', 'TradeDateTime', 'TradeDateTime2', 'Within10ms']).head() # Symbol TradeDateTime TradeDateTime2 Within10ms -- ------ --------------------------- --------------------------- ---------- 0 AAPL 19771006 11:46:39.512132962 19771006 11:46:39.519132962 True 1 SPY 20000321 15:00:25.630646023 20000321 15:00:25.646646023 False 2 SPY 19720130 05:36:37.195744004 19720130 05:36:37.196744004 True 3 TSLA 19960902 00:45:11.619930786 19960902 00:45:11.637930786 False 4 TSLA 19901216 15:52:53.935112408 19901216 15:52:53.946112408 False 5 GME 19900910 22:20:09.846455444 19900910 22:20:09.849455444 True 6 AAPL 20000825 20:59:19.248822244 20000825 20:59:19.254822244 True 7 SPY 19740216 18:32:16.051989951 19740216 18:32:16.062989951 False 8 AMZN 19951222 07:27:43.668483372 19951222 07:27:43.674483372 True 9 AAPL 20180708 11:19:48.016609690 20180708 11:19:48.019609690 True 10 TSLA 20110429 21:11:34.789939106 20110429 21:11:34.802939106 False 11 GME 19921202 20:27:45.957970537 19921202 20:27:45.970970537 False 12 SPY 19980801 10:04:29.793513895 19980801 10:04:29.796513895 True 13 SPY 19970217 08:00:06.615346852 19970217 08:00:06.621346852 True 14 SPY 20060915 20:18:28.369763536 20060915 20:18:28.374763536 True 15 SPY 19991220 16:10:56.841720714 19991220 16:10:56.854720714 False 16 TSLA 19730131 01:08:43.413049524 19730131 01:08:43.427049524 False 17 AAPL 20040518 15:53:50.561136824 20040518 15:53:50.575136824 False 18 GME 19710809 14:51:55.347200052 19710809 14:51:55.365200052 False 19 TSLA 19980613 01:40:56.278221632 19980613 01:40:56.293221632 False And again we can use the result as a mask array:: >>> ds[ds.Within10ms, ['Symbol', 'Size']] # Symbol Size --- ------ ----- 0 AAPL 19.99 1 SPY 78.69 2 GME 78.07 3 AAPL 45.89 4 AMZN 13.98 5 AAPL 11.45 6 SPY 56.52 7 SPY 76.50 8 SPY 63.47 9 TSLA 21.46 10 AMZN 40.85 11 SPY 28.14 12 TSLA 29.36 13 GME 66.19 14 TSLA 55.70 ... ... ... 37 TSLA 49.40 38 TSLA 10.34 39 SPY 58.76 40 GME 17.06 41 GME 34.69 42 SPY 59.09 43 SPY 2.28 44 AAPL 95.86 45 GME 8.27 46 GME 49.07 47 GME 93.78 48 AMZN 33.16 49 SPY 52.07 50 SPY 43.89 51 AAPL 2.16 A common situation is having dates as date strings and times in nanos since midnight. You can use some arithmetic to build a DateTimeNano: ``Date`` + ``TimeSpan`` = ``DateTimeNano``:: >>> ds = rt.Dataset({ ... 'Date': ['20111111', '20200202', '20220222'], ... 'Time': [44_275_000_000_000, 39_287_000_000_000, 55_705_000_000_000] ... }) >>> # Convert the date strings to rt.Date objects >>> ds.Date = rt.Date(ds.Date) >>> # Convert the times to rt.TimeSpan objects >>> ds.Time = rt.TimeSpan(ds.Time) >>> ds # Date Time - ---------- ------------------ 0 2011-11-11 12:17:55.000000000 1 2020-02-02 10:54:47.000000000 2 2022-02-22 15:28:25.000000000 At this point, you might want to simply add ``ds.Date`` and ``ds.Time`` to get a ``DateTimeNano``:: >>> ds.DateTime = ds.Date + ds.Time >>> ds # Date Time DateTime - ---------- ------------------ --------------------------- 0 2011-11-11 12:17:55.000000000 20111111 12:17:55.000000000 1 2020-02-02 10:54:47.000000000 20200202 10:54:47.000000000 2 2022-02-22 15:28:25.000000000 20220222 15:28:25.000000000 And that seems to work. However, remember that ``DateTimeNano``\ s need to have a time zone. Here, GMT was assumed:: >>> ds.DateTime DateTimeNano(['20111111 12:17:55.000000000', '20200202 10:54:47.000000000', '20220222 15:28:25.000000000'], to_tz='GMT') Specify your desired time zone so you don’t end up with unexpected results down the line:: >>> ds.DateTime2 = rt.DateTimeNano((ds.Date + ds.Time), from_tz='NYC') >>> ds.DateTime2 DateTimeNano(['20111111 12:17:55.000000000', '20200202 10:54:47.000000000', '20220222 15:28:25.000000000'], to_tz='NYC') Warning: Given that ``TimeSpan + Date = DateTimeNano``, and also that you can use ``rt.Date(my_dtn)`` to get a ``Date`` from a ``DateTimeNano``, you might reasonably think you can get the ``TimeSpan`` from a ``DateTimeNano`` using ``rt.TimeSpan(my_dtn)``. However, that result includes the number of days since January 1, 1970. To get the ``TimeSpan`` from a ``DateTimeNano``, use ``time_since_midnight()`` instead. +----------------------------------------+ | **Datetime Arithmetic** | +========================================+ | Date + Date = TypeError | +----------------------------------------+ | Date + DateTimeNano = TypeError | +----------------------------------------+ | Date + DateSpan = Date | +----------------------------------------+ | Date + TimeSpan = DateTimeNano | +----------------------------------------+ | | +----------------------------------------+ | Date - Date = DateSpan | +----------------------------------------+ | Date - DateSpan = Date | +----------------------------------------+ | Date - DateTimeNano = TimeSpan | +----------------------------------------+ | Date - TimeSpan = DateTimeNano | +----------------------------------------+ | | +----------------------------------------+ | DateTimeNano - DateTimeNano = TimeSpan | +----------------------------------------+ | DateTimeNano - TimeSpan = DateTimeNano | +----------------------------------------+ | DateTimeNano + TimeSpan = DateTimeNano | +----------------------------------------+ | | +----------------------------------------+ | TimeSpan - TimeSpan = TimeSpan | +----------------------------------------+ | TimeSpan + TimeSpan = TimeSpan | +----------------------------------------+ Next, we’ll look at Riptable’s vehicle for group operations: `Perform Group Operations with Categoricals <tutorial_categoricals.rst>`__. -------------- Questions or comments about this guide? Email RiptableDocumentation@sig.com.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/tutorial_datetimes.rst
0.929648
0.815857
tutorial_datetimes.rst
pypi
Merge Datasets ============== Merging gives you more flexibility to bring data from different Datasets together. A merge operation connects rows in Datasets using a “key” column that the Datasets have in common. Riptable’s two main Dataset merge functions are ``merge_lookup()`` and ``merge_asof()``. Generally speaking, ``merge_lookup()`` aligns data based on identical keys, while ``merge_asof()`` aligns data based on the nearest key. For more general merges, ``merge2()`` does database-style left, right, inner, and outer joins. ``merge_lookup()`` ------------------ Let’s start with ``merge_lookup()``. It’s common to have one Dataset that has most of the information you need, and another, usually smaller Dataset that has information you want to add to the first Dataset to enrich it. Here we’ll create a larger Dataset with symbols and size values, and a smaller Dataset that has symbols associated with trader names. We’ll use the shared Symbol column as the key to add the trader info to the larger Dataset:: >>> rng = np.random.default_rng(seed=42) >>> N = 25 >>> # Larger Dataset >>> ds = rt.Dataset({'Symbol': rng.choice(['GME', 'AMZN', 'TSLA', 'SPY'], N), ... 'Size': rng.integers(1, 1000, N),}) >>> # Smaller Dataset, with data used to enrich the larger Dataset >>> ds_symbol_trader = rt.Dataset({'Symbol': ['GME', 'TSLA', 'SPY', 'AMZN'], ... 'Trader': ['Nate', 'Elon', 'Josh', 'Dan']}) >>> ds.head() # Symbol Size -- ------ ---- 0 GME 644 1 SPY 403 2 TSLA 822 3 AMZN 545 4 AMZN 443 5 SPY 451 6 GME 228 7 TSLA 93 8 GME 555 9 GME 888 10 TSLA 64 11 SPY 858 12 TSLA 827 13 SPY 277 14 TSLA 632 15 SPY 166 16 TSLA 758 17 GME 700 18 SPY 355 19 AMZN 68 >>> ds_symbol_trader # Symbol Trader - ------ ------ 0 GME Nate 1 TSLA Elon 2 SPY Josh 3 AMZN Dan ``merge_lookup()`` with Key Columns That Have the Same Name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Now we’ll use ``merge_lookup()`` to add the trader information to the larger Dataset. ``merge_lookup()`` will align the data based on exact matches in the shared Symbol column. A note about terms: When you merge two Datasets, the Dataset you’re merging data into is the *left Dataset*; the one you’re getting data from is the *right Dataset*. Here, we call ``merge_lookup()`` on our left Dataset, ``ds``. We pass it the name of the right Dataset, and tell it what column to use as the key:: >>> ds.merge_lookup(ds_symbol_trader, on='Symbol') # Symbol Size Trader -- ------ ---- ------ 0 GME 644 Nate 1 SPY 403 Josh 2 TSLA 822 Elon 3 AMZN 545 Dan 4 AMZN 443 Dan 5 SPY 451 Josh 6 GME 228 Nate 7 TSLA 93 Elon 8 GME 555 Nate 9 GME 888 Nate 10 TSLA 64 Elon 11 SPY 858 Josh 12 TSLA 827 Elon 13 SPY 277 Josh 14 TSLA 632 Elon 15 SPY 166 Josh 16 TSLA 758 Elon 17 GME 700 Nate 18 SPY 355 Josh 19 AMZN 68 Dan 20 TSLA 970 Elon 21 AMZN 446 Dan 22 GME 893 Nate 23 SPY 678 Josh 24 SPY 778 Josh The left Dataset now has the trader information, correctly aligned. You can also use the following syntax, passing ``merge_lookup()`` the names of the left and right Datasets, along with the key:: >>> rt.merge_lookup(ds, ds_symbol_trader, on='Symbol') # Symbol Size Trader -- ------ ---- ------ 0 GME 644 Nate 1 SPY 403 Josh 2 TSLA 822 Elon 3 AMZN 545 Dan 4 AMZN 443 Dan 5 SPY 451 Josh 6 GME 228 Nate 7 TSLA 93 Elon 8 GME 555 Nate 9 GME 888 Nate 10 TSLA 64 Elon 11 SPY 858 Josh 12 TSLA 827 Elon 13 SPY 277 Josh 14 TSLA 632 Elon 15 SPY 166 Josh 16 TSLA 758 Elon 17 GME 700 Nate 18 SPY 355 Josh 19 AMZN 68 Dan 20 TSLA 970 Elon 21 AMZN 446 Dan 22 GME 893 Nate 23 SPY 678 Josh 24 SPY 778 Josh ``merge_lookup`` with Key Columns That Have Different Names ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the key column has a different name in each Dataset, just specify each column name with ``left_on`` and ``right_on``:: >>> # For illustrative purposes, rename the key column in the right Dataset. >>> ds_symbol_trader.col_rename('Symbol', 'UnderlyingSymbol') >>> ds.merge_lookup(ds_symbol_trader, left_on='Symbol', right_on='UnderlyingSymbol') # Symbol Size UnderlyingSymbol Trader -- ------ ---- ---------------- ------ 0 GME 644 GME Nate 1 SPY 403 SPY Josh 2 TSLA 822 TSLA Elon 3 AMZN 545 AMZN Dan 4 AMZN 443 AMZN Dan 5 SPY 451 SPY Josh 6 GME 228 GME Nate 7 TSLA 93 TSLA Elon 8 GME 555 GME Nate 9 GME 888 GME Nate 10 TSLA 64 TSLA Elon 11 SPY 858 SPY Josh 12 TSLA 827 TSLA Elon 13 SPY 277 SPY Josh 14 TSLA 632 TSLA Elon 15 SPY 166 SPY Josh 16 TSLA 758 TSLA Elon 17 GME 700 GME Nate 18 SPY 355 SPY Josh 19 AMZN 68 AMZN Dan 20 TSLA 970 TSLA Elon 21 AMZN 446 AMZN Dan 22 GME 893 GME Nate 23 SPY 678 SPY Josh 24 SPY 778 SPY Josh Notice that when the key columns have different names, both are kept. If you want keep only certain columns from the left or right Dataset, you can specify them with ``columns_left`` or ``columns_right``:: >>> ds.merge_lookup(ds_symbol_trader, left_on='Symbol', right_on='UnderlyingSymbol', ... columns_right='Trader') # Symbol Size Trader -- ------ ---- ------ 0 GME 644 Nate 1 SPY 403 Josh 2 TSLA 822 Elon 3 AMZN 545 Dan 4 AMZN 443 Dan 5 SPY 451 Josh 6 GME 228 Nate 7 TSLA 93 Elon 8 GME 555 Nate 9 GME 888 Nate 10 TSLA 64 Elon 11 SPY 858 Josh 12 TSLA 827 Elon 13 SPY 277 Josh 14 TSLA 632 Elon 15 SPY 166 Josh 16 TSLA 758 Elon 17 GME 700 Nate 18 SPY 355 Josh 19 AMZN 68 Dan 20 TSLA 970 Elon 21 AMZN 446 Dan 22 GME 893 Nate 23 SPY 678 Josh 24 SPY 778 Josh Note: ``merge_lookup()`` Keeps Only the Keys in the Left Dataset ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One thing to note about ``merge_lookup()`` is that it keeps only the keys are that are in the left Dataset (it’s equivalent to a SQL left join). If there are keys in the right Dataset that aren’t in the left Dataset, they’re discarded in the merged data:: >>> # Create a right Dataset with an extra symbol key ('MSFT'). >>> ds_symbol_trader2 = rt.Dataset({'Symbol': ['GME', 'TSLA', 'SPY', 'AMZN', 'MSFT'], ... 'Trader': ['Nate', 'Elon', 'Josh', 'Dan', 'Lauren']}) >>> # Change 'UnderlyingSymbol' back to 'Symbol' for simplicity. >>> ds_symbol_trader.col_rename('UnderlyingSymbol', 'Symbol') >>> ds.merge_lookup(ds_symbol_trader2, on='Symbol', columns_right='Trader') # Symbol Size Trader -- ------ ---- ------ 0 GME 644 Nate 1 SPY 403 Josh 2 TSLA 822 Elon 3 AMZN 545 Dan 4 AMZN 443 Dan 5 SPY 451 Josh 6 GME 228 Nate 7 TSLA 93 Elon 8 GME 555 Nate 9 GME 888 Nate 10 TSLA 64 Elon 11 SPY 858 Josh 12 TSLA 827 Elon 13 SPY 277 Josh 14 TSLA 632 Elon 15 SPY 166 Josh 16 TSLA 758 Elon 17 GME 700 Nate 18 SPY 355 Josh 19 AMZN 68 Dan 20 TSLA 970 Elon 21 AMZN 446 Dan 22 GME 893 Nate 23 SPY 678 Josh 24 SPY 778 Josh ``merge_lookup()`` with Overlapping Columns That Aren’t Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As we saw above, if the two key columns have the same name in both Datasets, only one is kept. For columns that aren’t used as keys, you’ll get a name collision error when you try to merge:: >>> # Add a Size column to the right Dataset >>> ds_symbol_trader.Size = rng.integers(1, 1000, 4) >>> try: ... rt.merge_lookup(ds, ds_symbol_trader, on='Symbol') ... except ValueError as e: ... print("ValueError:", e) ValueError: columns overlap but no suffix specified: {'Size'} If you want to keep both columns, add a suffix to each column name to disambiguate them:: >>> rt.merge_lookup(ds, ds_symbol_trader, on='Symbol', suffixes=('_1', '_2')) # Symbol Size_1 Trader Size_2 -- ------ ------ ------ ------ 0 GME 644 Nate 760 1 SPY 403 Josh 364 2 TSLA 822 Elon 195 3 AMZN 545 Dan 467 4 AMZN 443 Dan 467 5 SPY 451 Josh 364 6 GME 228 Nate 760 7 TSLA 93 Elon 195 8 GME 555 Nate 760 9 GME 888 Nate 760 10 TSLA 64 Elon 195 11 SPY 858 Josh 364 12 TSLA 827 Elon 195 13 SPY 277 Josh 364 14 TSLA 632 Elon 195 15 SPY 166 Josh 364 16 TSLA 758 Elon 195 17 GME 700 Nate 760 18 SPY 355 Josh 364 19 AMZN 68 Dan 467 20 TSLA 970 Elon 195 21 AMZN 446 Dan 467 22 GME 893 Nate 760 23 SPY 678 Josh 364 24 SPY 778 Josh 364 ``merge_lookup()`` with a Right Dataset That Has Duplicate Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the right Dataset has more than one match for a unique key in the left Dataset, you can specify whether to use the first or the last match encountered in the right Dataset:: >>> # Create a right Dataset with a second GME key, associated to Lauren >>> ds_symbol_trader3 = rt.Dataset({'Symbol': ['GME', 'TSLA', 'SPY', 'AMZN', 'GME'], ... 'Trader': ['Nate', 'Elon', 'Josh', 'Dan', 'Lauren']}) >>> ds_symbol_trader3 # Symbol Trader - ------ ------ 0 GME Nate 1 TSLA Elon 2 SPY Josh 3 AMZN Dan 4 GME Lauren We’ll keep the last match:: >>> ds.merge_lookup(ds_symbol_trader3, on='Symbol', columns_right='Trader', keep='last') # Symbol Size Trader -- ------ ---- ------ 0 GME 644 Lauren 1 SPY 403 Josh 2 TSLA 822 Elon 3 AMZN 545 Dan 4 AMZN 443 Dan 5 SPY 451 Josh 6 GME 228 Lauren 7 TSLA 93 Elon 8 GME 555 Lauren 9 GME 888 Lauren 10 TSLA 64 Elon 11 SPY 858 Josh 12 TSLA 827 Elon 13 SPY 277 Josh 14 TSLA 632 Elon 15 SPY 166 Josh 16 TSLA 758 Elon 17 GME 700 Lauren 18 SPY 355 Josh 19 AMZN 68 Dan 20 TSLA 970 Elon 21 AMZN 446 Dan 22 GME 893 Lauren 23 SPY 678 Josh 24 SPY 778 Josh ``merge_asof()`` ---------------- In a ``merge_asof()``, Riptable matches on the nearest key rather than an equal key. This is useful for merges based on keys that are times, where the times in one Dataset are not an exact match for the times in another Dataset, but they’re close enough to be used to merge the data. Note: To most efficiently find the nearest match, ``merge_asof()`` requires both key columns to be sorted. The key columns must also be numeric, such as a datetime, integer, or float. You can check whether a column is sorted with ``issorted()``, or just sort it using ``sort_inplace()``. (If the key columns aren’t sorted, Riptable will give you an error when you try to merge.) With ``merge_asof()``, you need to specify how you want to find the closest match: - ``direction='forward'`` matches based on the closest key in the right Dataset that’s greater than the key in the left Dataset. - ``direction='backward'`` matches based on the closest key in the right Dataset that’s less than the key in the left Dataset. - ``direction='nearest'`` matches based on the closest key in the right Dataset, regardless of whether it’s greater than or less than the key in the left Dataset. Let’s see an example based on closest times. The left Dataset has three trades and their times. The right Dataset has spot prices and times that are not all exact matches. We’ll merge the spot prices from the right Dataset by getting the values associated with the nearest earlier times. >>> # Left Dataset with trades and times >>> ds = rt.Dataset({'Symbol': ['AAPL', 'AMZN', 'AAPL'], ... 'Venue': ['A', 'I', 'A'], ... 'Time': rt.TimeSpan(['09:30', '10:00', '10:20'])}) >>> # Right Dataset with spot prices and nearby times >>> spot_ds = rt.Dataset({'Symbol': ['AMZN', 'AMZN', 'AMZN', 'AAPL', 'AAPL', 'AAPL'], ... 'Spot Price': [2000.0, 2025.0, 2030.0, 500.0, 510.0, 520.0], ... 'Time': rt.TimeSpan(['09:30', '10:00', '10:25', '09:25', '10:00', '10:25'])}) >>> ds # Symbol Venue Time - ------ ----- ------------------ 0 AAPL A 09:30:00.000000000 1 AMZN I 10:00:00.000000000 2 AAPL A 10:20:00.000000000 >>> spot_ds # Symbol Spot Price Time - ------ ---------- ------------------ 0 AMZN 2,000.00 09:30:00.000000000 1 AMZN 2,025.00 10:00:00.000000000 2 AMZN 2,030.00 10:25:00.000000000 3 AAPL 500.00 09:25:00.000000000 4 AAPL 510.00 10:00:00.000000000 5 AAPL 520.00 10:25:00.000000000 Note that an as-of merge requires the ``on`` columns to be sorted. Before the merge, the ``on`` columns are always checked. If they're not sorted, by default they are sorted before the merge; the original order is then restored in the returned merged Dataset. If you don't need to preserve the existing ordering, it's faster to sort the ``on`` columns in place first:: >>> spot_ds.sort_inplace('Time') # Symbol Spot Price Time - ------ ---------- ------------------ 0 AAPL 500.00 09:25:00.000000000 1 AMZN 2,000.00 09:30:00.000000000 2 AMZN 2,025.00 10:00:00.000000000 3 AAPL 510.00 10:00:00.000000000 4 AAPL 520.00 10:25:00.000000000 5 AMZN 2,030.00 10:25:00.000000000 Now we can merge based on the nearest earlier time. But not just any nearest earlier time – we want to make sure it’s the nearest earlier time associated with the same symbol. We use the optional ``by`` parameter to make sure we match on the symbol before getting the nearest earlier time. We'll also use the ``matched_on`` argument to show which key in ``spot_ds`` was matched on:: >>> ds.merge_asof(spot_ds, on='Time', by='Symbol', direction='backward', matched_on=True) # Symbol Time Venue Spot Price matched_on - ------ ------------------ ----- ---------- ------------------ 0 AAPL 09:30:00.000000000 A 500.00 09:25:00.000000000 1 AMZN 10:00:00.000000000 I 2,025.00 10:00:00.000000000 2 AAPL 10:20:00.000000000 A 510.00 10:00:00.000000000 We can see that both AAPL trades were matched based on the nearest earlier time. Merge based on the nearest later time:: >>> ds.merge_asof(spot_ds, on='Time', by='Symbol', direction='forward', matched_on=True) # Symbol Time Venue Spot Price matched_on - ------ ------------------ ----- ---------- ------------------ 0 AAPL 09:30:00.000000000 A 510.00 10:00:00.000000000 1 AMZN 10:00:00.000000000 I 2,025.00 10:00:00.000000000 2 AAPL 10:20:00.000000000 A 520.00 10:25:00.000000000 Both AAPL trades were matched based on the nearest later time. Here, we get the spot price associated with whatever time is nearest, whether it’s earlier or later:: >>> ds.merge_asof(spot_ds, on='Time', by='Symbol', direction='nearest', matched_on=True) # Symbol Time Venue Spot Price matched_on - ------ ------------------ ----- ---------- ------------------ 0 AAPL 09:30:00.000000000 A 500.00 09:25:00.000000000 1 AMZN 10:00:00.000000000 I 2,025.00 10:00:00.000000000 2 AAPL 10:20:00.000000000 A 520.00 10:25:00.000000000 For the first AAPL trade, the nearest time is earlier. For the second AAPL trade, the nearest time is later. We won’t spend time on examples of ``merge2()``, which is Riptable’s more general merge function that does database-style joins (left, right, inner, outer). Check out the API Reference for details. Next, we’ll briefly cover a couple of ways to change the shape of your Dataset: `Reshape Data with Pivot and Transpose <tutorial_reshape.rst>`__. -------------- Questions or comments about this guide? Email RiptableDocumentation@sig.com.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/tutorial_merge.rst
0.855036
0.716343
tutorial_merge.rst
pypi
Riptable Categoricals -- Filtering ********************************** .. currentmodule:: riptable Categoricals that use base-1 indexing can be filtered when they're created or anytime afterwards. Filters can also be applied on a one-off basis at the time of an operation. Values or entire categories can be filtered. Filtered items are mapped to 0 in the integer mapping array and omitted from operations. On this page: - `Filtering at Categorical creation`_ - `Filtering after Categorical creation`_ - `Filter an operation on a Categorical`_ - `Set a name for filtered values`_ - `See the name set for filtered values`_ Filtering at Categorical creation --------------------------------- Provide a ``filter`` argument to filter values at Categorical creation. Filtered values are omitted from all operations on the Categorical. Notes: - Only base-1 indexing is supported -- the 0 is reserved for Filtered values. - You can't use a dictionary or :py:class:`~enum.IntEnum` to create a Categorical with a filter. You can filter out certain values or an entire category:: >>> f = rt.FA([True, True, False, True, True, True, True]) # The mask must be an array, not a list. >>> c = rt.Categorical(["a", "a", "b", "a", "c", "c", "b"], filter=f) # One "b" value is filtered. >>> c Categorical([a, a, Filtered, a, c, c, b]) Length: 7 FastArray([1, 1, 0, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c.count() *key_0 Count ------ ----- a 3 b 1 c 2 In the example below, an entire category is filtered. If the Categorical is constructed from values without provided categories, categories that are entirely filtered out do not appear in the array of unique categories or in the results of operations:: >>> vals = rt.FA(["a", "a", "b", "a", "c", "c", "b"]) >>> f = (vals != "b") # Filter out all "b" values. >>> c = rt.Categorical(vals, filter=f) >>> c Categorical([a, a, Filtered, a, c, c, Filtered]) Length: 7 FastArray([1, 1, 0, 1, 2, 2, 0], dtype=int8) Base Index: 1 FastArray([b'a', b'c'], dtype='|S1') Unique count: 2 >>> c.count() *key_0 Count ------ ----- a 3 c 2 If categories are provided, entirely filtered-out categories do appear in the array of unique categories and the results of operations:: >>> c = rt.Categorical(vals, categories=["a", "b", "c"], filter=f) >>> c Categorical([a, a, Filtered, a, c, c, Filtered]) Length: 7 FastArray([1, 1, 0, 1, 3, 3, 0], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c.count() *key_0 Count ------ ----- a 3 b 0 c 2 Multi-key Categoricals can also be filtered at creation. >>> f = rt.FA([False, False, True, False, True, True]) >>> vals1 = rt.FastArray(["a", "b", "b", "a", "b", "a"]) >>> vals2 = rt.FastArray([2, 1, 1, 3, 2, 1]) >>> rt.Categorical([vals1, vals2], filter=f) Categorical([Filtered, Filtered, (b, 1), Filtered, (b, 2), (a, 1)]) Length: 6 FastArray([0, 0, 1, 0, 2, 3], dtype=int8) Base Index: 1 {'key_0': FastArray([b'b', b'b', b'a'], dtype='|S1'), 'key_1': FastArray([1, 2, 1])} Unique count: 3 Categoricals using base-0 indexing can't be filtered at creation:: >>> f = rt.FA([False, False, True, False, True, True, False]) >>> try: ... rt.Categorical([0, 1, 1, 2, 2, 0, 1], base_index=0, filter=f) ... except ValueError as e: ... print("ValueError:", e) ValueError: Filtering is not allowed for base index 0. Use base-1 indexing instead. Categoricals created using a dictionary or :py:class:`~enum.IntEnum` can't be filtered by passing a `filter` argument at creation, but a Filtered category can be included by by using the integer sentinel value as the Filtered mapping code. They can also be filtered after creation using `set_valid()`. Using the `filter` argument gets an error:: >>> f = rt.FA([True, False, False, False, False]) >>> d = {44: "StronglyAgree", 133: "Agree", 75: "Disagree", 1: "StronglyDisagree", 144: "NeitherAgreeNorDisagree" } >>> codes = [1, 44, 144, 133, 75] >>> try: ... rt.Categorical(codes, categories=d, filter=f) ... except TypeError as e: ... print("TypeError:", e) TypeError: Grouping from enum does not support pre-filtering. However, you can include a Filtered category by using the integer sentinel value in your mapping:: >>> d = {-2147483648: "Filtered", 44: "StronglyAgree", 133: "Agree", 75: "Disagree", 1: "StronglyDisagree", 144: "NeitherAgreeNorDisagree" } >>> codes = [-2147483648, 44, 144, 133, 75] >>> c = rt.Categorical(codes, categories=d) >>> c Categorical([Filtered, StronglyAgree, NeitherAgreeNorDisagree, Agree, Disagree]) Length: 5 FastArray([-2147483648, 44, 144, 133, 75]) Base Index: None {-2147483648:'Filtered', 44:'StronglyAgree', 133:'Agree', 75:'Disagree', 1:'StronglyDisagree', 144:'NeitherAgreeNorDisagree'} Unique count: 5 >>> from enum import IntEnum >>> class LikertDecision(IntEnum): ... # A Likert scale with the typical five-level Likert item format. ... Filtered = -2147483648 ... StronglyAgree = 44 ... Agree = 133 ... Disagree = 75 ... StronglyDisagree = 1 ... NeitherAgreeNorDisagree = 144 >>> codes = [-2147483648, 1, 44, 144, 133, 75] >>> rt.Categorical(codes, categories=LikertDecision) Categorical([Filtered, StronglyDisagree, StronglyAgree, NeitherAgreeNorDisagree, Agree, Disagree]) Length: 6 FastArray([-2147483648, 1, 44, 144, 133, 75]) Base Index: None {-2147483648:'Filtered', 44:'StronglyAgree', 133:'Agree', 75:'Disagree', 1:'StronglyDisagree', 144:'NeitherAgreeNorDisagree'} Unique count: 6 You can also filter an existing category after creation using `~riptable.rt_categorical.Categorical.set_valid` (see below). Filtering after Categorical creation ------------------------------------ Calling `~riptable.rt_categorical.Categorical.set_valid` on a Categorical returns a filtered copy of the Categorical. >>> c = rt.Categorical(["a", "a", "b", "a", "c", "c", "b"]) >>> c Categorical([a, a, b, a, c, c, b]) Length: 7 FastArray([1, 1, 2, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> f = rt.FA([True, True, False, True, True, True, True]) # Filter out 1 "b" value. >>> c.set_valid(f) Categorical([a, a, Filtered, a, c, c, b]) Length: 7 FastArray([1, 1, 0, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 The original Categorical isn't modified:: >>> c Categorical([a, a, b, a, c, c, b]) Length: 7 FastArray([1, 1, 2, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Entirely filtered-out bins are removed from the array of unique categories:: >>> vals = rt.FA(["a", "a", "b", "a", "c", "c", "b"]) >>> f = (vals != "b") # Filter out all "b" values. >>> c.set_valid(f) Categorical([a, a, Filtered, a, c, c, Filtered]) Length: 7 FastArray([1, 1, 0, 1, 2, 2, 0], dtype=int8) Base Index: 1 FastArray([b'a', b'c'], dtype='|S1') Unique count: 2 A Categorical created with a mapping dictionary or :py:class:`~enum.IntEnum` can be filtered after creation. Filtered values are mapped to the integer sentinel value:: >>> d = {44: "StronglyAgree", 133: "Agree", 75: "Disagree", 1: "StronglyDisagree", 144: "NeitherAgreeNorDisagree" } >>> codes = [1, 44, 144, 133, 75] >>> c = rt.Categorical(codes, categories=d) >>> c Categorical([StronglyDisagree, StronglyAgree, NeitherAgreeNorDisagree, Agree, Disagree]) Length: 5 FastArray([ 1, 44, 144, 133, 75]) Base Index: None {44:'StronglyAgree', 133:'Agree', 75:'Disagree', 1:'StronglyDisagree', 144:'NeitherAgreeNorDisagree'} Unique count: 5 >>> f = rt.FA([False, True, True, True, True]) # Filter out 1: "StronglyDisagree". >>> c.set_valid(f) Categorical([Filtered, StronglyAgree, NeitherAgreeNorDisagree, Agree, Disagree]) Length: 5 FastArray([-2147483648, 44, 144, 133, 75]) Base Index: None {44:'StronglyAgree', 133:'Agree', 75:'Disagree', 144:'NeitherAgreeNorDisagree', -2147483648:'Filtered'} Unique count: 5 >>> class LikertDecision(IntEnum): ... # A Likert scale with the typical five-level Likert item format. ... StronglyAgree = 44 ... Agree = 133 ... Disagree = 75 ... StronglyDisagree = 1 ... NeitherAgreeNorDisagree = 144 >>> codes = [1, 44, 144, 133, 75] >>> c = rt.Categorical(codes, categories=LikertDecision) >>> c Categorical([StronglyDisagree, StronglyAgree, NeitherAgreeNorDisagree, Agree, Disagree]) Length: 5 FastArray([ 1, 44, 144, 133, 75]) Base Index: None {44:'StronglyAgree', 133:'Agree', 75:'Disagree', 1:'StronglyDisagree', 144:'NeitherAgreeNorDisagree'} Unique count: 5 >>> f = rt.FA([False, True, True, True, True]) # Filter out 1: "StronglyDisagree". >>> c.set_valid(f) Categorical([Filtered, StronglyAgree, NeitherAgreeNorDisagree, Agree, Disagree]) Length: 5 FastArray([-2147483648, 44, 144, 133, 75]) Base Index: None {44:'StronglyAgree', 133:'Agree', 75:'Disagree', 144:'NeitherAgreeNorDisagree', -2147483648:'Filtered'} Unique count: 5 Filtering can be useful to re-index a Categorical so only its occurring uniques are shown:: >>> f = (vals != "b") >>> c2 = c[f] >>> c2 Categorical([a, a, a, c, c]) Length: 5 FastArray([1, 1, 1, 3, 3], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c2.sum(rt.arange(5)) *key_0 col_0 ------ ----- a 3 b 0 c 7 >>> # Use set_valid to create a re-indexed Categorical:. >>> c3 = c2.set_valid() >>> c3 Categorical([a, a, a, c, c]) Length: 5 FastArray([1, 1, 1, 2, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'c'], dtype='|S1') Unique count: 2 >>> c3.count() *key_0 Count ------ ----- a 3 c 2 >>> c3.sum(rt.arange(5)) *key_0 col_0 ------ ----- a 3 c 7 Filter an operation on a Categorical ------------------------------------ To filter one operation (such as a sum), use the ``filter`` argument for the operation. Filtered results are omitted, but any entirely filtered categories still appear in the results:: >>> # Put the Categorical in a Dataset to better see >>> # the associated values used in the operation. >>> ds = rt.Dataset() >>> vals = rt.FA(["a", "a", "b", "a", "c", "c", "b"]) >>> c = rt.Categorical(vals) >>> ds.cats = c >>> ds.ints = rt.arange(7) >>> ds # cats ints - ---- ---- 0 a 0 1 a 1 2 b 2 3 a 3 4 c 4 5 c 5 6 b 6 >>> f = rt.FA([True, True, False, True, True, True, True]) # One "b" value is filtered. >>> c.sum(ints, filter=f) *key_0 ints ------ ---- a 4 b 6 c 9 >>> f = (cats != "b") # Filter out all "b" values. >>> c.sum(ints, filter=f) *key_0 ints ------ ---- a 4 b 0 c 9 The Categorical doesn't retain the filter:: >>> c Categorical([a, a, b, a, c, c, b]) Length: 7 FastArray([1, 1, 2, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 To see the results of the operation applied to all Filtered values (irrespective of their group), use the ``showfilter`` argument:: >>> # A "b" value (2) and a "c" value (5) are filtered. >>> f = rt.FA([True, True, False, True, True, False, True]) >>> c.sum(ints, filter=f, showfilter=True) *key_0 ints -------- ---- Filtered 7 a 4 b 6 c 4 >>> f = (cats != "a") # Filter out all "a" values. >>> c.sum(ints, filter=f, showfilter=True) *key_0 ints -------- ---- Filtered 4 a 0 b 8 c 9 Set a name for filtered values ------------------------------ You can set a string for displaying filtered values using `~riptable.rt_categorical.Categorical.filtered_set_name`:: >>> vals = rt.FA(["a", "a", "b", "a", "c", "c", "b"]) >>> f = (vals != "b") >>> c = rt.Categorical(vals, filter=f) >>> c.filtered_set_name("FNAME") >>> c Categorical([a, a, FNAME, a, c, c, FNAME]) Length: 7 FastArray([1, 1, 0, 1, 2, 2, 0], dtype=int8) Base Index: 1 FastArray([b'a', b'c'], dtype='|S1') Unique count: 2 See the name set for filtered values ------------------------------------ To see the string used when filtered values are displayed, use the `~riptable.rt_categorical.Categorical.filtered_name` property:: >>> c.filtered_name 'FNAME'
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/categoricals_user_guide_filters.rst
0.905953
0.697673
categoricals_user_guide_filters.rst
pypi
Riptable Categoricals -- Invalid Categories ******************************************* .. currentmodule:: riptable A category set to be invalid at Categorical creation is considered to be NaN in the sense that `~riptable.rt_categorical.Categorical.isnan` returns `True` for the category, but it's mapped to a valid index and not excluded from any operations on the Categorical. To exclude values or categories from operations, use the ``filter`` argument. Note that this behavior differs from `Previous invalid behavior`_. Warning: If the invalid category isn't in the provided list of unique categories and a filter is also provided at Categorical creation, the invalid category also becomes Filtered. Categorical created from values (no user-provided categories) ------------------------------------------------------------- Because it's assigned to a regular bin, an invalid category is allowed for base-0 and base-1 indexing:: >>> c = rt.Categorical(["b", "a", "a", "Inv", "c", "a", "b"], invalid="Inv", base_index=0) >>> c Categorical([b, a, a, Inv, c, a, b]) Length: 7 FastArray([2, 1, 1, 0, 3, 1, 2]) Base Index: 0 FastArray([b'Inv', b'a', b'b', b'c'], dtype='|S3') Unique count: 4 >>> c.isnan() FastArray([False, False, False, True, False, False, False]) >>> c = rt.Categorical(['b', 'a', 'Inv', 'a'], invalid='Inv') >>> c Categorical([b, a, Inv, a]) Length: 4 FastArray([3, 2, 1, 2], dtype=int8) Base Index: 1 FastArray([b'Inv', b'a', b'b'], dtype='|S3') Unique count: 3 >>> c.isnan() FastArray([False, False, True, False]) Categorical created from values and user-provided categories ------------------------------------------------------------ If an invalid category is specified, it must also be in the list of unique categories, otherwise an error is raised:: >>> # Included. >>> c = rt.Categorical(["b", "a", "Inv", "a"], categories=["a", "b", "Inv"], invalid="Inv") >>> c Categorical([b, a, Inv, a]) Length: 4 FastArray([2, 1, 3, 1], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'Inv'], dtype='|S3') Unique count: 3 >>> # Not included. >>> try: ... rt.Categorical(["b", "a", "Inv", "a"], categories=["a", "b"], invalid="Inv") ... except ValueError as e: ... print("ValueError:", e) ValueError: Found values that were not in provided categories: [b'Inv']. The user-supplied categories (second argument) must also contain the invalid item Inv. For example: Categorical(['b','a','Inv','a'], ['a','b','Inv'], invalid='Inv') Categorical created with a filter --------------------------------- Be careful when mixing invalid categories and filters. If you filter an invalid category, it becomes Filtered and no longer invalid:: >>> c = rt.Categorical(["Inv", "a", "b", "a"], categories=["Inv", "a", "b"], ... filter=rt.FA([False, True, True, True]), invalid="Inv") >>> c Categorical([Filtered, a, b, a]) Length: 4 FastArray([0, 2, 3, 2], dtype=int8) Base Index: 1 FastArray([b'Inv', b'a', b'b'], dtype='|S3') Unique count: 3 >>> c.isnan() FastArray([False, False, False, False]) Warning: If the invalid category *isn't* included in the array of unique cagtegories and you *also* provide a filter, the invalid category *also becomes Filtered* even if it isn't filtered out directly. For comparison, here's an example where the invalid category *is* included in the list of unique categories and a filter is provided. You get a warning that doesn't apply in this case, and the filter is applied:: >>> c = rt.Categorical(["Inv", "a", "b", "a"], categories=["Inv", "a", "b"], ... filter=rt.FA([True, True, False, False]), invalid="Inv") UserWarning: Invalid category was set to Inv. If not in provided categories, will also appear as filtered. For example: print(Categorical(['a','a','b'], ['b'], filter=FA([True, True, False]), invalid='a')) -> Filtered, Filtered, Filtered The second two values are filtered:: >>> c Categorical([Inv, a, Filtered, Filtered]) Length: 4 FastArray([1, 2, 0, 0], dtype=int8) Base Index: 1 FastArray([b'Inv', b'a', b'b'], dtype='|S3') Unique count: 3 And the invalid category is still invalid:: >>> c.isnan() FastArray([ True, False, False, False]) However, when the invalid category *is not* included in the list of unique categories, the warning does apply, and the invalid category also becomes Filtered:: >>> c = rt.Categorical(["Inv", "a", "b", "a"], categories=["a", "b"], ... filter=rt.FA([True, True, False, False]), invalid="Inv") UserWarning: Invalid category was set to Inv. If not in provided categories, will also appear as filtered. For example: print(Categorical(['a','a','b'], ['b'], filter=FA([True, True, False]), invalid='a')) -> Filtered, Filtered, Filtered >>> c Categorical([Filtered, a, Filtered, Filtered]) Length: 4 FastArray([0, 1, 0, 0], dtype=int8) Base Index: 1 FastArray([b'a', b'b'], dtype='|S1') Unique count: 2 And "Inv" is no longer considered an invalid category:: >>> c.isnan() FastArray([False, False, False, False]) Invalid categories are not excluded from operations --------------------------------------------------- Although invalid categories are recognized by the Categorical `~riptable.rt_categorical.Categorical.isnan` method, they are not excluded from operations as filtered values and categories are. Here, "Inv" is invalid and the "b" category is filtered:: >>> vals = rt.FA(["Inv", "b", "a", "b", "c", "c", "Inv"]) >>> f = vals != "b" >>> c = rt.Categorical(vals, invalid="Inv", filter=f) >>> c Categorical([Inv, Filtered, a, Filtered, c, c, Inv]) Length: 7 FastArray([1, 0, 2, 0, 3, 3, 1], dtype=int8) Base Index: 1 FastArray([b'Inv', b'a', b'c'], dtype='|S3') Unique count: 3 >>> c.isnan() FastArray([ True, False, False, False, False, False, True]) Create some values to sum and put in a Dataset to see their relationsips to the catgegories:: >>> vals = rt.FA([1, 2, 3, 4, 5, 6, 7]) >>> ds = rt.Dataset({"c": c, "vals": vals}) >>> ds # c vals - -------- ---- 0 Inv 1 1 Filtered 2 2 a 3 3 Filtered 4 4 c 5 5 c 6 6 Inv 7 Get the ``nansum``:: >>> c.nansum(vals) *c vals --- ---- Inv 8 a 3 c 11 The ``showfilter`` argument confirms that only the "b" values were excluded:: >>> c.nansum(vals, showfilter=True) *c vals -------- ---- Filtered 6 Inv 8 a 3 c 11 If you use the ``filter`` argument with ``nansum`` and filter out an invalid, the filtered invalid value is excluded from the operation:: >>> # Filter the first Inv, one of the already-filtered "b"s, and the first "c". >>> f2 = rt.FA([False, False, True, True, True, False, True]) >>> c.nansum(vals, filter=f2, showfilter=True) *key_0 col_0 -------- ----- Filtered 13 Inv 7 a 3 c 5 If both invalid values are filtered by the ``nansum`` operation, the category still appears in the result:: >>> f3 = rt.FA([False, False, True, True, True, False, False]) >>> c.nansum(vals, filter=f3) *c vals --- ---- Inv 0 a 3 c 5 And both invalid values are still invalid:: >>> c.isnan() FastArray([ True, False, False, False, False, False, True]) Previous invalid behavior ------------------------- Previously, the specified string was used to represent an invalid catgegory when values missing in the categories list were encountered. The invalid category was mapped to 0 in the index/codes array. This is similar to how Pandas works, except that Pandas uses -1 for its NaN index:: >>> import pandas as pd >>> pdc = pd.Categorical(["a", "a", "z", "b", "c"], ["a", "b", "c"]) >>> pdc ['a', 'a', NaN, 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pdc._codes array([ 0, 0, -1, 1, 2], dtype=int8) >>> pd.Series([1, 1, 1, 1, 1]).groupby(pdc).count() a 2 b 1 c 1 dtype: int64
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/categoricals_user_guide_invalid_categories.rst
0.921305
0.747524
categoricals_user_guide_invalid_categories.rst
pypi
Riptable Categoricals -- Final dtype of Integer Mapping Array ************************************************************* .. currentmodule:: riptable Final dtype from provided mapping code/index array -------------------------------------------------- If the user provides the integer array of mapping codes, the array will not be recast unless: - The integer type is unsigned. If a large enough dtype is specified with the ``dtype`` argument, it will be used; otherwise the smallest possible dtype will be used based on the number of unique categories or the maximum value provided in a mapping. - A dtype is specified that's large enough to accommodate the provided codes. If the dtype isn't large enough, the array is upcast to the smallest possible dtype that can be used. Codes with a signed integer dtype:: >>> codes = rt.FastArray([2, 4, 4, 3, 2, 1, 3, 2, 0, 1, 3, 4, 2, 0, 4, ... 3, 1, 0, 1, 2, 3, 1, 4, 2, 2, 3, 4, 2, 0, 2], dtype=rt.int64) >>> cats = rt.FastArray(["a", "b", "c", "d", "e"]) It is unchanged:: >>> rt.Categorical(codes, categories=cats) Categorical([b, d, d, c, b, ..., c, d, b, Filtered, b]) Length: 30 FastArray([2, 4, 4, 3, 2, ..., 3, 4, 2, 0, 2], dtype=int64) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5 The codes have an unsigned dtype. No ``dtype`` argument is provided, so the smallest dtype is found:: >>> c = rt.Categorical(codes.astype(rt.uint64), categories=cats) Categorical([b, d, d, c, b, ..., c, d, b, Filtered, b]) Length: 30 FastArray([2, 4, 4, 3, 2, ..., 3, 4, 2, 0, 2]) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5 >>> c._fa.dtype dtype('int8') The codes have an unsigned dtype, and the specified dtype is large enough to be used:: >>> rt.Categorical(codes.astype(rt.uint8), categories=cats, dtype=rt.int64) Categorical([b, d, d, c, b, ..., c, d, b, Filtered, b]) Length: 30 FastArray([2, 4, 4, 3, 2, ..., 3, 4, 2, 0, 2], dtype=int64) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5 The codes have a signed dtype, and a different dtype is specified that's large enough to accommodate the provided codes:: >>> rt.Categorical(codes.astype(rt.int16), categories=cats, dtype=rt.int64) Categorical([b, d, d, c, b, ..., c, d, b, Filtered, b]) Length: 30 FastArray([2, 4, 4, 3, 2, ..., 3, 4, 2, 0, 2], dtype=int64) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5 The codes have a signed dtype, but the specified dtype is too small:: >>> big_cats = rt.FastArray(['string'+str(i) for i in range(2000)]) >>> rt.Categorical(codes, big_cats, dtype=rt.int8) UserWarning: A type of <class 'riptable.rt_numpy.int8'> was too small, upcasting. Categorical([string1, string3, string3, string2, string1, ..., string2, string3, string1, Filtered, string1]) Length: 30 FastArray([2, 4, 4, 3, 2, ..., 3, 4, 2, 0, 2], dtype=int16) Base Index: 1 FastArray([b'string0', b'string1', b'string2', b'string3', b'string4', ..., b'string1995', b'string1996', b'string1997', b'string1998', b'string1999'], dtype='|S10') Unique count: 2000 Final dtype from Matlab index array ----------------------------------- If the index array is from Matlab, it is often floating-point. Unless a dtype is specified, the smallest dtype will be found:: No dtype is specified; the smallest usable dtype is found:: >>> matlab_codes = (codes + 1).astype(rt.float32) >>> rt.Categorical(matlab_codes, categories=cats, from_matlab=True) Categorical([c, e, e, d, c, ..., d, e, c, a, c]) Length: 30 FastArray([3, 5, 5, 4, 3, ..., 4, 5, 3, 1, 3], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5 A dtype is specified that's large enough to be used:: >>> rt.Categorical(matlab_codes, categories=cats, from_matlab=True, dtype=rt.int64) Categorical([c, e, e, d, c, ..., d, e, c, a, c]) Length: 30 FastArray([3, 5, 5, 4, 3, ..., 4, 5, 3, 1, 3], dtype=int64) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5 Final dtype from strings or strings + categories ------------------------------------------------ A new index array is generated. Unless a dtype is specified, the smallest usable dtype will be found. No dtype specified; the smallest usable dtype is found:: >>> str_fa = rt.FastArray(["c", "e", "e", "d", "c", "b", "d", "c", "a", "b", ... "d", "e", "c", "a", "e", "d", "b", "a", "b", "c", ... "d", "b", "e", "c", "c", "d", "e", "c", "a", "c"]) >>> rt.Categorical(str_fa) Categorical([c, e, e, d, c, ..., d, e, c, a, c]) Length: 30 FastArray([3, 5, 5, 4, 3, ..., 4, 5, 3, 1, 3], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5 A large enough dtype is specified:: >>> rt.Categorical(str_fa, dtype=rt.int64) Categorical([c, e, e, d, c, ..., d, e, c, a, c]) Length: 30 FastArray([3, 5, 5, 4, 3, ..., 4, 5, 3, 1, 3], dtype=int64) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5 Final dtype from a multi-key Categorical ---------------------------------------- This follows the same rules as string construction. Unless a dtype is specified, the smallest usable dtype is found. No dtype specified; the smallest usable dtype is found:: >>> rt.Categorical([str_fa, codes]) Categorical([(c, 2), (e, 4), (e, 4), (d, 3), (c, 2), ..., (d, 3), (e, 4), (c, 2), (a, 0), (c, 2)]) Length: 30 FastArray([1, 2, 2, 3, 1, ..., 3, 2, 1, 5, 1], dtype=int8) Base Index: 1 {'key_0': FastArray([b'c', b'e', b'd', b'b', b'a'], dtype='|S1'), 'key_1': FastArray([2, 4, 3, 1, 0])} Unique count: 5 A large enough dtype is specified:: >>> rt.Categorical([str_fa, codes], dtype=rt.int64) Categorical([(c, 2), (e, 4), (e, 4), (d, 3), (c, 2), ..., (d, 3), (e, 4), (c, 2), (a, 0), (c, 2)]) Length: 30 FastArray([1, 2, 2, 3, 1, ..., 3, 2, 1, 5, 1], dtype=int64) Base Index: 1 {'key_0': FastArray([b'c', b'e', b'd', b'b', b'a'], dtype='|S1'), 'key_1': FastArray([2, 4, 3, 1, 0])} Unique count: 5 Final dtype from a Pandas Categorical ------------------------------------- Pandas already attempts to find the smallest dtype during Categorical construction. If a Riptable Categorical is created from a Pandas Categorical and a dtype is specified, Riptable uses the specified dtype. Construction from Pandas always generates a new array because Riptable adds 1 to the indices:: >>> import pandas as pd >>> pdc = pd.Categorical(str_fa) >>> pdc._codes array([2, 4, 4, 3, 2, 1, 3, 2, 0, 1, 3, 4, 2, 0, 4, 3, 1, 0, 1, 2, 3, 1, 4, 2, 2, 3, 4, 2, 0, 2], dtype=int8) >>> c = rt.Categorical(pdc) >>> c Categorical([c, e, e, d, c, ..., d, e, c, a, c]) Length: 30 FastArray([3, 5, 5, 4, 3, ..., 4, 5, 3, 1, 3], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5 >>> c = rt.Categorical(pdc, dtype=rt.int32) Categorical([c, e, e, d, c, ..., d, e, c, a, c]) Length: 30 FastArray([3, 5, 5, 4, 3, ..., 4, 5, 3, 1, 3]) Base Index: 1 FastArray([b'a', b'b', b'c', b'd', b'e'], dtype='|S1') Unique count: 5
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/categoricals_user_guide_dtype.rst
0.857976
0.720116
categoricals_user_guide_dtype.rst
pypi
Riptable Datasets, FastArrays, and Structs =================================================== What Is a Dataset? ------------------ A Dataset is a table of data that consists of a sequence of columns of the same length. It’s similar to a spreadsheet, a SQL table, a Pandas DataFrame or the data.frame in R. The Dataset is the workhorse of Riptable. .. figure:: rt_dataset.svg :alt: A Dataset is a sequence of columns that are the same length. Each column in a Dataset consists of a key (also referred to as the column label, header, or name) and a series of values stored in a Riptable FastArray. A FastArray is a 1-dimensional array of values that are all the same data type, or dtype. .. figure:: rt_fastarray.svg :alt: A FastArray holds values that are the same dtype. Though each Dataset column has a single dtype, the Dataset overall can hold columns of various dtypes. Dataset rows are implicitly indexed by integer. You can select rows using their indices, but you can’t reindex rows or give them arbitrary labels. This restriction helps Riptable perform Dataset operations more efficiently. .. figure:: rt_dataset_indices.svg :alt: Dataset rows are implicitly indexed. Create a Dataset ---------------- Generally speaking, there are a few ways to create Riptable Datasets. You can convert a Python dictionary or use Riptable’s dictionary-style syntax, or create an empty Dataset and add arrays as columns. Convert a Python Dictionary to a Riptable Dataset ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you have a Python dictionary, it’s easy to convert it to a Riptable Dataset:: >>> my_dict = {'Column1': ['A', 'B', 'C', 'D'], 'Column2': [0, 1, 2, 3]} # Create a Python dictionary >>> ds = rt.Dataset(my_dict) # Convert it to a Riptable Dataset >>> ds # Column1 Column2 - ------- ------- 0 A 0 1 B 1 2 C 2 3 D 3 Another way to think of a Dataset is as a dictionary of same-length FastArrays, where each key is a column name that’s mapped to a FastArray of values that all have the same dtype. For Python dictionary details, see `Python’s documentation <https://docs.python.org/3/tutorial/datastructures.html#dictionaries>`__. Use the Dataset Constructor with Dictionary-Style Input ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``rt.Dataset()`` uses dictionary-style syntax:: >>> ds = rt.Dataset({'Column1': ['A', 'B', 'C', 'D'], 'Column2': [0.0, 1.0, 2.0, 3.0]}) >>> ds # Column1 Column2 - ------- ------- 0 A 0.00 1 B 1.00 2 C 2.00 3 D 3.00 Create an Empty Dataset and Add Columns to It ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can also create an empty dataset by using ``rt.Dataset()`` without any dictionary input ... :: >>> ds = rt.Dataset() ... and then add columns to it. Add Dataset Columns (FastArrays) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first column you add to the Dataset can be any length, but all future columns must match that length. The columns you add to the Dataset become aligned, meaning that they share the same row index. You can add a column to a Dataset using attribute assignment or dictionary-style syntax. Here, we use attribute assignment to create a column named ‘Column1’ that holds a list of values:: >>> ds.Column1 = [0.0, 1.0, 2.0, 3.0, 4.0] >>> ds # Column1 - ------- 0 0.00 1 1.00 2 2.00 3 3.00 4 4.00 The list becomes a FastArray. You can use attribute access to get the column’s data:: >>> ds.Column1 FastArray([0., 1., 2., 3., 4.]) Here, we use dictionary-style syntax to add a column of integers:: >>> ds['Ints'] = [1, 2, 3, 4, 5] >>> ds # Column1 Ints - ------- ---- 0 0.00 1 1 1.00 2 2 2.00 3 3 3.00 4 4 4.00 5 And we can use dictionary-style syntax to access column data:: >>> ds['Ints'] FastArray([1, 2, 3, 4, 5]) A Note About Column Names ~~~~~~~~~~~~~~~~~~~~~~~~~ Column names should meet Python’s rules for well-formed variable names. If a column name doesn’t meet these rules (for example, if it’s a procedurally generated name that starts with a symbol), you can’t refer to it or get its data using attribute access. For example, trying to access a column called #%&ColumnName with ``ds.#%&ColumnName`` will give you a syntax error. To access the column, you’ll need to use dictionary-style syntax: ``ds['#%&ColumnName']``. Python keywords and Riptable class methods are also restricted. If you’re not sure whether a column name is valid, you can use the Dataset method ``is_valid_colname()``. For example, ``for`` is invalid because it’s a Python keyword:: >>> ds.is_valid_colname('for') False And ``col_move`` is invalid because it’s a Dataset class method:: >>> ds.is_valid_colname('col_move') False You can see all restricted names with ``get_restricted_names``:: >>> # Limit and format the output. >>> print("Some of the restricted names include...\n") >>> print(", ".join(list(ds.get_restricted_names())[::10])) Some of the restricted names include... mask_or_isinf, __reduce_ex__, imatrix_xy, __weakref__, dtypes, _get_columns, from_arrow, elif, __imul__, _deleteitem, __rsub__, _index_from_row_labels, as_matrix, putmask, _as_meta_data, shape, cat, __invert__, try, _init_columns_as_dict, label_as_dict, col_str_replace, _replaceitem, label_set_names, __contains__, __floordiv__, _row_numbers, filter, __init__, sorts_on, flatten_undo, col_str_match, __dict__, size, __rand__, info, col_remove, as, or Add a NumPy Array as a Column ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you have a 1-dimensional NumPy array, you can add that as a column – it also will be converted to a FastArray:: >>> my_np_array = np.array([5.0, 6.0, 7.5, 8.5, 9.0]) >>> ds.NPArr = my_np_array >>> ds # Column1 Ints NPArr - ------- ---- ----- 0 0.00 1 5.00 1 1.00 2 6.00 2 2.00 3 7.50 3 3.00 4 8.50 4 4.00 5 9.00 Warning: Although you can technically convert a 2-dimensional (or higher) NumPy array to a multi-dimensional FastArray, multi-dimensional FastArrays aren’t supported and you could get unexpected results when you try to work with one:: >>> a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> a_fa = rt.FastArray(a) C:\\riptable\\rt_fastarray.py:561: UserWarning: FastArray contains two or more dimensions greater than one - shape:(3, 4). Problems may occur. warnings.warn(warning_string) If you don’t specify the dtype, Riptable makes its best guess:: >>> ds.Ints.dtype dtype('int32') If you want to specify the dtype, create a FastArray directly with the ``dtype`` parameter:: >>> ds.Floats = rt.FastArray([0, 1, 2, 3, 4], dtype=float) >>> ds # Column1 Ints NPArr Floats - ------- ---- ----- ------ 0 0.00 1 5.00 0.00 1 1.00 2 6.00 1.00 2 2.00 3 7.50 2.00 3 3.00 4 8.50 3.00 4 4.00 5 9.00 4.00 Tip: You can also create a FastArray using the shortcut ``rt.FA()``. If you add a column with a single value, the value is duplicated to fill every existing row:: >>> ds.Ones = 1 >>> ds # Column1 Ints NPArr Floats Ones - ------- ---- ----- ------ ---- 0 0.00 1 5.00 0.00 1 1 1.00 2 6.00 1.00 1 2 2.00 3 7.50 2.00 1 3 3.00 4 8.50 3.00 1 4 4.00 5 9.00 4.00 1 Instantiating a column with ones or zeros as placeholder data can be useful – see some options in the `Instantiate with Placeholder Values and Generate Sample Data <tutorial_sample_data.rst>`__ section. Get Basic Info About a Dataset ------------------------------ Datasets have attributes (sometimes also called properties) that give you information about them. To better see how they work, let’s create a slightly larger Dataset:: >>> rng = np.random.default_rng(seed=42) # Construct a random number generator >>> ds2 = rt.Dataset() >>> N = 50 >>> ds2.Symbol = rt.FA(np.random.choice(['AAPL', 'AMZN', 'TSLA', 'SPY', 'GME'], N)) >>> ds2.Size = rt.FA(np.random.choice([100, 200, 300, 400, 500], N)) >>> ds2.Value = rng.random(N) >>> ds2 # Symbol Size Value --- ------ ---- ----- 0 SPY 500 0.77 1 AMZN 500 0.44 2 AAPL 400 0.86 3 SPY 300 0.70 4 TSLA 300 0.09 5 SPY 400 0.98 6 GME 300 0.76 7 TSLA 500 0.79 8 AAPL 400 0.13 9 GME 500 0.45 10 SPY 300 0.37 11 SPY 400 0.93 12 TSLA 100 0.64 13 AMZN 100 0.82 14 SPY 400 0.44 ... ... ... ... 35 AMZN 400 0.19 36 GME 200 0.13 37 AMZN 400 0.48 38 SPY 500 0.23 39 TSLA 500 0.67 40 AMZN 100 0.44 41 AAPL 300 0.83 42 AAPL 400 0.70 43 AAPL 200 0.31 44 AAPL 300 0.83 45 TSLA 100 0.80 46 GME 500 0.39 47 AAPL 300 0.29 48 AAPL 200 0.68 49 GME 400 0.14 Use ``shape`` to get the Dataset’s dimensions returned as a tuple (rows, cols):: >>> ds2.shape (50, 3) See the dtypes of a Dataset (note the plural ``.dtypes`` vs. the singular ``.dtype`` for FastArrays):: >>> ds2.dtypes {'Symbol': dtype('S4'), 'Size': dtype('int32'), 'Value': dtype('float64')} Datasets also have methods that give you a feel for the data they contain. Useful methods for seeing quick subsets of your Dataset are ``head()``, ``tail()``, and ``sample()``. By default, ``head()`` and ``tail()`` show you the first or last 20 rows, respectively, while ``sample()`` shows you 10 rows randomly selected from the Dataset. For each, you can pass an argument to show a custom number of rows. The first 5 rows:: >>> ds2.head(5) # Symbol Size Value - ------ ---- ----- 0 SPY 500 0.77 1 AMZN 500 0.44 2 AAPL 400 0.86 3 SPY 300 0.70 4 TSLA 300 0.09 The last 10 rows:: >>> ds2.tail(10) # Symbol Size Value - ------ ---- ----- 0 AMZN 100 0.44 1 AAPL 300 0.83 2 AAPL 400 0.70 3 AAPL 200 0.31 4 AAPL 300 0.83 5 TSLA 100 0.80 6 GME 500 0.39 7 AAPL 300 0.29 8 AAPL 200 0.68 9 GME 400 0.14 If the first or last rows aren’t representative of your data, it can be preferable to use ``sample``:: >>> ds2.sample() # Symbol Size Value - ------ ---- ----- 0 GME 300 0.76 1 SPY 400 0.44 2 AMZN 100 0.83 3 TSLA 400 0.76 4 SPY 200 0.97 5 GME 100 0.15 6 SPY 400 0.97 7 AMZN 500 0.37 8 AMZN 400 0.19 9 AAPL 200 0.68 For numerical data, ``describe()`` gives you summary statistics. Non-numerical columns are ignored:: >>> ds2.describe() *Stats Size Value ------ ------ ----- Count 50.00 50.00 Valid 50.00 50.00 Nans 0.00 0.00 Mean 302.00 0.54 Std 142.13 0.28 Min 100.00 0.04 P10 100.00 0.14 P25 200.00 0.32 P50 300.00 0.52 P75 400.00 0.78 P90 500.00 0.86 Max 500.00 0.98 MeanM 302.38 0.54 For each numerical column, ``describe()`` provides these summary statistics: =============== ============================== **Calculation** **Description** =============== ============================== Count Total number of items Valid Total number of valid values Nans Total number of NaN values* Mean Mean Std Standard deviation Min Minimum value P10 10th percentile P25 25th percentile P50 50th percentile P75 75th percentile P90 90th percentile Max Maximum value MeanM Mean without top or bottom 10% =============== ============================== \*NaN stands for Not a Number, and is commonly used to represent missing data. For details, see `Working with Missing Data <tutorial_missing_data.rst>`__. You can also use ``describe()`` on a single column:: >>> ds2.Value.describe() *Stats Value ------ ----- Count 50.00 Valid 50.00 Nans 0.00 Mean 0.54 Std 0.28 Min 0.04 P10 0.14 P25 0.32 P50 0.52 P75 0.78 P90 0.86 Max 0.98 MeanM 0.54 If your Dataset is very large, you can get column statistics with ``statx()``, which you can import from ``riptable.rt_stats``. ``statx()`` provides rapid sampling and gives you a few more percentiles than ``describe()`` does, but it works only on one column at a time:: >>> from riptable.rt_stats import statx >>> statx(ds2.Value) Stat Value 0 min 0.043804 1 0.1% 0.044784 2 1% 0.053610 3 10% 0.138769 4 25% 0.315731 5 50% 0.515145 6 75% 0.777277 7 90% 0.862050 8 99% 0.973209 9 99.9% 0.975381 10 max 0.975622 11 Mean 0.535233 12 StdDev 0.277838 13 Count 50.000000 14 NaN_Count 0.000000 Other Useful Dataset Methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See a column’s unique values:: >>> ds2.Symbol.unique() FastArray([b'AAPL', b'AMZN', b'GME', b'SPY', b'TSLA'], dtype='|S4') A note about strings in FastArrays: When you view a FastArray of strings, you’ll see a ‘b’ next to each string. These b's indicate that the strings are encoded to byte strings, which saves memory compared to saving strings as ASCII. Count the number of unique values in a column:: >>> ds2.Symbol.count() *Symbol Count ------- ----- AAPL 12 AMZN 12 GME 7 SPY 8 TSLA 11 Note that ``count()`` displays aggregated results. We’ll look more at Riptable’s structures and functions for aggregations later, when we cover Categoricals and Accums. View the Dataset as a dictionary:: >>> ds2.asdict() {'Symbol': FastArray([b'TSLA', b'SPY', b'GME', b'SPY', b'SPY', b'AAPL', b'AAPL', b'SPY', b'TSLA', b'AMZN', b'SPY', b'AMZN', b'AMZN', b'TSLA', b'GME', b'SPY', b'SPY', b'SPY', b'SPY', b'GME', b'AAPL', b'AAPL', b'TSLA', b'SPY', b'AMZN', b'TSLA', b'TSLA', b'AAPL', b'TSLA', b'SPY', b'GME', b'AAPL', b'SPY', b'AMZN', b'AAPL', b'AAPL', b'AMZN', b'TSLA', b'GME', b'AMZN', b'GME', b'AMZN', b'AAPL', b'AMZN', b'AAPL', b'AAPL', b'AMZN', b'GME', b'AAPL', b'AMZN'], dtype='|S4'), 'Size': FastArray([400, 100, 100, 300, 300, 400, 300, 300, 300, 200, 500, 500, 500, 400, 400, 100, 500, 400, 500, 200, 400, 500, 300, 200, 200, 500, 400, 100, 500, 500, 300, 300, 200, 300, 500, 200, 200, 500, 200, 300, 400, 200, 100, 500, 100, 400, 400, 200, 200, 400]), 'Value': FastArray([0.77395605, 0.43887844, 0.85859792, 0.69736803, 0.09417735, 0.97562235, 0.7611397 , 0.78606431, 0.12811363, 0.45038594, 0.37079802, 0.92676499, 0.64386512, 0.82276161, 0.4434142 , 0.22723872, 0.55458479, 0.06381726, 0.82763117, 0.6316644 , 0.75808774, 0.35452597, 0.97069802, 0.89312112, 0.7783835 , 0.19463871, 0.466721 , 0.04380377, 0.15428949, 0.68304895, 0.74476216, 0.96750973, 0.32582536, 0.37045971, 0.46955581, 0.18947136, 0.12992151, 0.47570493, 0.22690935, 0.66981399, 0.43715192, 0.8326782 , 0.7002651 , 0.31236664, 0.8322598 , 0.80476436, 0.38747838, 0.2883281 , 0.6824955 , 0.13975248])} Select Dataset Columns ---------------------- As mentioned above, you can access a Dataset column using attribute access (``ds.Column1``) or using dictionary-style syntax (``ds['Column1']``). To select multiple columns of a Dataset, pass a list of column names to ``col_filter()``:: >>> ds.col_filter(['Floats', 'Ones']) # Floats Ones - ------ ---- 0 0.00 1 1 1.00 1 2 2.00 1 3 3.00 1 4 4.00 1 ``col_filter()`` also accepts regular expressions:: >>> ds.col_filter(regex='Col*') # Column1 - ------- 0 0.00 1 1.00 2 2.00 3 3.00 4 4.00 For selecting subsets of columns, Riptable supports all of the indexing, slicing, and “fancy indexing” operations supported by NumPy arrays. Select a single value at index 0:: >>> ds.Column1[0] 0.0 Get a slice of contiguous values from index 1 (included) to index 4 (excluded):: >>> ds.Column1[1:4] FastArray([1., 2., 3.]) To use fancy indexing, pass an array that specifies noncontiguous indices and your desired ordering:: >>> ds.Floats[[1, 3, 0]] FastArray([1., 3., 0.]) You can also set values using indexing and slicing:: >>> ds.Column1[0] = 5.0 >>> ds.Ints[1:3] = 4 >>> ds.Floats[2:4] = 10.0, 20.0 >>> ds.Ones[[1, 3, 0]] = 2_000_000, 4_000_000, 5_000_000 # Underscores are nice for code readability! >>> ds # Column1 Ints NPArr Floats Ones - ------- ---- ----- ------ ------- 0 5.00 1 5.00 0.00 5000000 1 1.00 4 6.00 1.00 2000000 2 2.00 4 7.50 10.00 1 3 3.00 4 8.50 20.00 4000000 4 4.00 5 9.00 4.00 1 Warning: Trying to insert a floating-point value into a column/FastArray of integers will cause the floating-point value to be silently truncated:: >>> ds.Ones[0] = 1.5 >>> ds # Column1 Ints NPArr Floats Ones - ------- ---- ----- ------ ------- 0 5.00 1 5.00 0.00 1 1 1.00 4 6.00 1.00 2000000 2 2.00 4 7.50 10.00 1 3 3.00 4 8.50 20.00 4000000 4 4.00 5 9.00 4.00 1 To learn more about accessing data using indexing and slicing, see examples for 1-dimensional NumPy ndarrays in `NumPy’s documentation <https://numpy.org/doc/stable/user/index.html>`__. Select Dataset Rows ------------------- To select Dataset rows, you need to also specify which columns you want. First row, Column1:: >>> ds[0, 'Column1'] 5.0 You can also refer to columns by number:: >>> ds[0, 0] 5.0 The ``:`` specifies all columns:: >>> ds[0:3, :] # Column1 Ints NPArr Floats Ones - ------- ---- ----- ------ ------- 0 5.00 1 5.00 0.00 1 1 1.00 4 6.00 1.00 2000000 2 2.00 4 7.50 10.00 1 Or you can pass a list of multiple columns:: >>> ds[0:2, ['Ints', 'Ones']] # Ints Ones - ---- ------- 0 1 1 1 4 2000000 More often, you’ll probably use filters to get subsets of your data. That's covered in more detail in `Get and Operate on Subsets of Data Using Filters <tutorial_filters.rst>`__. Perform Operations on Dataset Columns ------------------------------------- FastArrays are a subclass of NumPy’s ndarray. Thanks to this, you can do anything with FastArrays that you can do with NumPy arrays. In particular, NumPy’s universal functions (ufuncs) are supported, allowing for fast, vectorized operations. (Vectorized functions operate element-wise on arrays without using Python loops, which are slow.) See the `NumPy API Reference <https://numpy.org/doc/stable/reference/index.html>`__ for a complete list and documentation for all NumPy methods. Note, though, that Riptable has implemented its own optimized version of many NumPy methods. If you call a NumPy method that’s been optimized by Riptable, the Riptable method is called. We encourage you to call the Riptable method directly to avoid any confusion about what method is being called. See `NumPy Methods Optimized by Riptable <tutorial_numpy_rt.rst>`__ for details. If a method hasn’t been optimized by Riptable, the NumPy method is called. Arithmetic on Column Values ~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can do various arithmetic operations on any numerical column (or standalone FastArray) and optionally put the results into a new column. Binary operations on two columns are performed on an element-by-element basis. The columns must be the same length:: >>> ds3 = rt.Dataset() >>> ds3.A = [0, 1, 2] >>> ds3.B = [5, 5, 5] >>> ds3.C = ds3.A + ds3.B >>> ds3 # A B C - - - - 0 0 5 5 1 1 5 6 2 2 5 7 FastArrays also support broadcasting, which allows you to perform a binary operation on a FastArray and a scalar. For example, you can add a scalar to an array. Riptable will upcast data types as necessary to preserve information:: >>> ds3.D = ds3.A + 5.1 >>> ds3 # A B C D - - - - ---- 0 0 5 5 5.10 1 1 5 6 6.10 2 2 5 7 7.10 Note that the standard order of operations is respected:: >>> ds3.E = -(0.5*ds3.A + 1) ** 2 >>> ds3 # A B C D E - - - - ---- ----- 0 0 5 5 5.10 -1.00 1 1 5 6 6.10 -2.25 2 2 5 7 7.10 -4.00 You can populate a Dataset column with the results of an operation on a column of another Dataset, as long as the resulting FastArray is the right length for the Dataset you want to add it to:: >>> ds4 = rt.Dataset({'A': [10, 11, 12], 'B': [21, 22, 23]}) >>> ds3.F = ds4.A * 2 >>> ds3 # A B C D E F - - - - ---- ----- -- 0 0 5 5 5.10 -1.00 20 1 1 5 6 6.10 -2.25 22 2 2 5 7 7.10 -4.00 24 Delete a Column from a Dataset ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To delete a column from a Dataset, use ``del ds.ColumnName``. Reducing Operations vs. Non-Reducing Operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The operations we’ve performed so far have been *non-reducing* operations. A non-reducing operation takes in multiple input values and returns one output value for each input value. That is, the resulting FastArray is the same length as the FastArray you operated on, and it can be added to the same Dataset. A *reducing* operation, on the other hand, takes in multiple inputs and returns one value. ``sum()`` and ``mean()`` are examples of reducing operations. This distinction will be more important when we talk about Categoricals and operations on grouped data. For now, we’ll get the results of two reducing operations without adding them to a Dataset. The total of the Size column:: >>> ds2.Size.sum() 15700 The average of the Value column:: >>> ds2.Value.mean() 0.5352327331104895 Tip: Many column operations can be called in two ways: as a method called on a FastArray (``ds2.Size.sum()``) or as a Riptable function with the column as the argument (``rt.sum(ds2.Size)``). Watch Out for Missing Values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When you’re working with real data, there will often be missing values. Take care when performing operations! In Riptable, missing floating-point values are represented by ``nan``. In a regular arithmetic operation with a floating-point ``nan``, the result is ``nan``:: >>> y = rt.FA([1.0, 2.0, 3.0, rt.nan]) >>> y.sum() nan Fortunately, many functions have “nan” versions that ignore ``nan`` values:: >>> y.nansum() 6.0 Useful NaN functions: +----------------------------+-----------------------------------------+ | **Function** | **Description** (all functions ignore | | | NaN values) | +============================+=========================================+ | nanmin(), nanmax() | Minimum and maximum | +----------------------------+-----------------------------------------+ | nanvar() | Variance | +----------------------------+-----------------------------------------+ | nanmean() | Mean | +----------------------------+-----------------------------------------+ | nanstd() | Standard deviation | +----------------------------+-----------------------------------------+ | nansum() | Total of all items | +----------------------------+-----------------------------------------+ | nanargmin(), nanargmax() | Index of the minimum or maximum value | +----------------------------+-----------------------------------------+ | rollingnansum(), | Rolling sum, rolling mean | | rollingnanmean() | | +----------------------------+-----------------------------------------+ Another way to deal with NaN values is to replace them with other values. For details, see `Working with Missing Data <tutorial_missing_data.rst>`__. Sort Column Values ~~~~~~~~~~~~~~~~~~ Sorting a column is straightforward. Use ``sort_copy()`` to return a sorted version of the array without modifying the original input, or ``sort_inplace()`` if you’re OK with modifying the original data:: >>> ds4 = rt.Dataset() >>> ds4.A = rng.choice(['AAPL', 'AMZN', 'TSLA', 'SPY', 'GME'], 10) >>> ds4.B = rng.integers(low=0, high=5, size=10) >>> ds4.C = rng.random(10) >>> ds4 # A B C - ---- - ---- 0 GME 1 0.67 1 AAPL 3 0.47 2 GME 2 0.57 3 AAPL 2 0.76 4 SPY 2 0.63 5 SPY 2 0.55 6 SPY 0 0.56 7 SPY 0 0.30 8 TSLA 1 0.03 9 SPY 0 0.44 You can sort by one column:: >>> ds4.sort_copy('A') # A B C - ---- - ---- 0 AAPL 2 0.76 1 AAPL 3 0.47 2 GME 1 0.67 3 GME 2 0.57 4 SPY 0 0.56 5 SPY 0 0.30 6 SPY 0 0.44 7 SPY 2 0.63 8 SPY 2 0.55 9 TSLA 1 0.03 Or by more than one column by passing an ordered list:: >>> ds4.sort_copy(['A', 'B']) # A B C - ---- - ---- 0 AAPL 2 0.76 1 AAPL 3 0.47 2 GME 1 0.67 3 GME 2 0.57 4 SPY 0 0.56 5 SPY 0 0.30 6 SPY 0 0.44 7 SPY 2 0.63 8 SPY 2 0.55 9 TSLA 1 0.03 With ``sort_copy()``, the original Dataset is not modified:: >>> ds4 # A B C - ---- - ---- 0 SPY 0 0.56 1 SPY 0 0.30 2 SPY 0 0.44 3 GME 1 0.67 4 TSLA 1 0.03 5 GME 2 0.57 6 AAPL 2 0.76 7 SPY 2 0.63 8 SPY 2 0.55 9 AAPL 3 0.47 Use ``sort_inplace()`` if you want to modify the original input (for example, if your data needs to be sorted by time, but isn’t):: >>> ds4.sort_inplace('B') # A B C - ---- - ---- 0 SPY 0 0.56 1 SPY 0 0.30 2 SPY 0 0.44 3 GME 1 0.67 4 TSLA 1 0.03 5 GME 2 0.57 6 AAPL 2 0.76 7 SPY 2 0.63 8 SPY 2 0.55 9 AAPL 3 0.47 Change the sort order by passing ``ascending=False``:: >>> ds4.sort_copy('A', ascending=False) # A B C - ---- - ---- 0 TSLA 1 0.03 1 SPY 2 0.55 2 SPY 2 0.63 3 SPY 0 0.44 4 SPY 0 0.30 5 SPY 0 0.56 6 GME 2 0.57 7 GME 1 0.67 8 AAPL 3 0.47 9 AAPL 2 0.76 Split Data into New Columns Using String Operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sometimes related pieces of data come bundled together in a single string, and you want to break up the data into separate columns. For example, take a look at the OSI Symbol field commonly found in trading-related data. OSIs are the official name for a tradable option. They contain several pieces of information that are separated by colons. For example, in **AAPL:191018:260:0:C**: - AAPL is the underlying symbol - 191018 represents an expiration date of 2019-10-18 - 260 is the strike price dollar amount - The 0 is the strike price penny amount - Other possibilities: :0: for 0.00, :5: for 0.50, :3: for 0.30, :25: for 0.25, :15: for 0.15 - “C” indicates a call (“P” indicates a put) Here’s what OSI Symbols might look like in a Dataset. We’ll use ``str.extract()`` to break them into separate columns:: >>> ds5 = rt.Dataset( ... {'OSISymbol':['SPY:191003:187:0:C','SPY:191003:193:0:C','TLT:191003:135:5:P', ... 'AAPL:191018:260:0:C', 'AAPL:191018:265:0:P'], ... 'Delta':[.93, .71, -.72, .45, -.81], ... 'PnL':[1.03, 0.61, 0.52, -0.14, .68] ... }) >>> ds5 # OSISymbol Delta PnL - --------------- ----- ----- 0 SPY:191003:187: 0.93 1.03 1 SPY:191003:193: 0.71 0.61 2 TLT:191003:135: -0.72 0.52 3 AAPL:191018:260 0.45 -0.14 4 AAPL:191018:265 -0.81 0.68 ``str.extract()`` uses regular expressions to match patterns and capture/extract the subpatterns that are surrounded by parentheses. Each captured subpattern is returned in a separate column. Below, we define five capture groups that correspond to five returned columns of data. Inside the capture groups, we match any letters or numbers:: >>> ds5[['Symbol', 'Expiration', 'StrikeDollar', 'StrikePenny', ... 'PutCall']] = ds5.OSISymbol.str.extract('(.*):(.*):(.*):(.*):(.*)', ... names=['Symbol', 'Expiration', 'StrikeDollar', 'StrikePenny', 'PutCall']) >>> ds5 # OSISymbol Delta PnL Symbol Expiration StrikeDollar StrikePenny PutCall - --------------- ----- ----- ------ ---------- ------------ ----------- ------- 0 SPY:191003:187: 0.93 1.03 SPY 191003 187 0 C 1 SPY:191003:193: 0.71 0.61 SPY 191003 193 0 C 2 TLT:191003:135: -0.72 0.52 TLT 191003 135 5 P 3 AAPL:191018:260 0.45 -0.14 AAPL 191018 260 0 C 4 AAPL:191018:265 -0.81 0.68 AAPL 191018 265 0 P It’s not ideal to have the strike dollar and strike penny amounts in separate columns, so we’ll add a fix:: >>> ds5.Strike = (ds5.StrikeDollar + '.' + ds5.StrikePenny).astype('float') >>> del ds5.StrikeDollar >>> del ds5.StrikePenny >>> ds5 # OSISymbol Delta PnL Symbol Expiration PutCall Strike - --------------- ----- ----- ------ ---------- ------- ------ 0 SPY:191003:187: 0.93 1.03 SPY 191003 C 187.00 1 SPY:191003:193: 0.71 0.61 SPY 191003 C 193.00 2 TLT:191003:135: -0.72 0.52 TLT 191003 P 135.50 3 AAPL:191018:260 0.45 -0.14 AAPL 191018 C 260.00 4 AAPL:191018:265 -0.81 0.68 AAPL 191018 P 265.00 Hold Two or More Datasets in a Struct ------------------------------------- When you’re working with multiple Datasets, it can be helpful to keep them together in a Riptable Struct. Structs were created as a base class for Datasets. They also replicate Matlab structs. You can think of a Struct as a Python dictionary, but with attribute access allowed for keys. Data structures stored together in a Struct don’t need to be aligned:: >>> s = rt.Struct() >>> s.ds = ds >>> s.ds2 = ds2 You can access each data structure using attribute-style access. For example: >>> s.ds2 # Symbol Size Value --- ------ ---- ----- 0 AAPL 300 0.77 1 AMZN 100 0.44 2 AAPL 300 0.86 3 GME 500 0.70 4 SPY 100 0.09 5 AMZN 300 0.98 6 TSLA 200 0.76 7 SPY 300 0.79 8 TSLA 300 0.13 9 TSLA 300 0.45 10 AAPL 400 0.37 11 AAPL 400 0.93 12 AAPL 400 0.64 13 GME 100 0.82 14 AMZN 100 0.44 ... ... ... ... 35 GME 200 0.19 36 TSLA 400 0.13 37 SPY 200 0.48 38 AMZN 500 0.23 39 GME 400 0.67 40 AAPL 300 0.44 41 SPY 100 0.83 42 TSLA 500 0.70 43 AAPL 500 0.31 44 AAPL 100 0.83 45 AAPL 200 0.80 46 AMZN 400 0.39 47 AMZN 500 0.29 48 AMZN 300 0.68 49 AMZN 400 0.14 Riptable has a few other methods for operating on strings. We'll use them as the basis for filtering data in the next section, `Get and Operate on Subsets of Data Using Filters <tutorial_filters.rst>`__. -------------- Questions or comments about this guide? Email RiptableDocumentation@sig.com.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/tutorial_datasets.rst
0.910766
0.935876
tutorial_datasets.rst
pypi
Riptable Categoricals -- Sorting and Display Order ************************************************** .. currentmodule:: riptable Whether a Categorical's categories are lexicographically sorted or considered to be "ordered" as specified at creation depends on two parameters: ``ordered`` and ``lex``. - ``ordered`` controls whether categories are sorted lexicographically before they are mapped to integers. - ``lex`` controls whether hashing- or sorting-based logic is used to find unique values in the input array. Note that if ``lex=True``, the categories become sorted even if ``ordered=False``. Additionally, the results of groupby operations can be displayed in sorted order with the ``sort_gb`` parameter. The way these parameters interact depends on whether categories are provided when the Categorical is created. **If categories are not provided,** then if ``ordered=True`` (the default) or ``lex=True`` they are sorted in the Categorical and in groupby results, even if ``sort_gb=False``. If ``ordered=False`` and ``lex=False``, the categories are held in the order of first appearance, and groupby results are sorted only if ``sort_gb=True``. +---------+-----+--------------------+-------------------------+ | ordered | lex | categories sorted? | groupby results sorted? | +=========+=====+====================+=========================+ | T | T | Y | Y | +---------+-----+--------------------+-------------------------+ | T | F | Y | Y | +---------+-----+--------------------+-------------------------+ | F | T | Y | Y | +---------+-----+--------------------+-------------------------+ | F | F | N | only if sort_gb=True | +---------+-----+--------------------+-------------------------+ **If categories are provided,** they are always held in the same order. The ``ordered`` argument is ignored, and ``lex`` can't be specified. Groupby results can be displayed in sorted order with ``sort_gb=True``. Categorical created from values (no user-provided categories) ------------------------------------------------------------- With the default ``ordered=True`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Categories are sorted. - Groupby results are sorted regardless of whether ``sort_gb=True``. :: >>> vals = rt.arange(6) >>> c = rt.Categorical(["b", "a", "a", "c", "a", "b"]) Categorical([b, a, a, c, a, b]) Length: 6 FastArray([2, 1, 1, 3, 1, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c.sum(vals) *key_0 col_0 ------ ----- a 7 b 5 c 3 With ``ordered=False`` ^^^^^^^^^^^^^^^^^^^^^^ - Categories are not sorted unless ``lex=True``. Setting ``lex=True`` causes a lexicographical sort to be performed to find the uniques, and the Categorical's categories become sorted even if ``ordered=False``. - Groupby results are not displayed sorted unless ``lex=True`` or ``sort_gb=True``. :: >>> vals = rt.arange(6) >>> c = rt.Categorical(["b", "a", "a", "c", "a", "b"], ordered=False) >>> c Categorical([b, a, a, c, a, b]) Length: 6 FastArray([1, 2, 2, 3, 2, 1], dtype=int8) Base Index: 1 FastArray([b'b', b'a', b'c'], dtype='|S1') Unique count: 3 >>> c.sum(vals) *key_0 col_0 ------ ----- b 5 a 7 c 3 Here, ``lex=True`` causes the categories to become sorted even though ``ordered=False``. >>> c = rt.Categorical(["b", "a", "a", "c", "a", "b"], ordered=False, lex=True) >>> c Categorical([b, a, a, c, a, b]) Length: 6 FastArray([2, 1, 1, 3, 1, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c.sum(vals) *key_0 col_0 ------ ----- a 7 b 5 c 3 >>> c = rt.Categorical(["b", "a", "a", "c", "a", "b"], ordered=False, sort_gb=True) >>> c Categorical([b, a, a, c, a, b]) Length: 6 FastArray([1, 2, 2, 3, 2, 1], dtype=int8) Base Index: 1 FastArray([b'b', b'a', b'c'], dtype='|S1') Unique count: 3 >>> c.sum(vals) *key_0 col_0 ------ ----- a 7 b 5 c 3 Categorical created from values and user-provided categories (unsorted) ----------------------------------------------------------------------- - If categories are provided, they are always held in the same order. The ``ordered`` argument is ignored, and you can't set ``lex=True``. - Groupby results can be displayed in sorted order with ``sort_gb=True``. Categorical with unsorted categories:: >>> c = rt.Categorical(["b", "a", "a", "c", "a", "b"], categories=["b", "a", "c"]) >>> c Categorical([b, a, a, c, a, b]) Length: 6 FastArray([1, 2, 2, 3, 2, 1], dtype=int8) Base Index: 1 FastArray([b'b', b'a', b'c'], dtype='|S1') Unique count: 3 Groupby results are in the order of provided categories:: >>> vals = rt.arange(6) >>> c.sum(vals) *key_0 col_0 ------ ----- b 5 a 7 c 3 With provided categories, ``lex`` can't be set to `True`:: >>> try: ... rt.Categorical(["b", "a", "a", "c", "a", "b"], categories=["b", "a", "c"], lex=True) ... except TypeError as e: ... print("TypeError:", e) TypeError: Cannot bin using lexsort and user-suplied categories. With ``sort_gb=True``, categories are held in the order provided but displayed lexicographically sorted in groupby results:: >>> c = rt.Categorical(["b", "a", "a", "c", "a", "b"], categories=["b", "a", "c"], sort_gb=True) >>> c Categorical([b, a, a, c, a, b]) Length: 6 FastArray([1, 2, 2, 3, 2, 1], dtype=int8) Base Index: 1 FastArray([b'b', b'a', b'c'], dtype='|S1') Unique count: 3 >>> c.sum(vals) *key_0 col_0 ------ ----- a 7 b 5 c 3 If the categories are provided in a mapping dictionary or :py:class:`~enum.IntEnum`, the groupby results are in the order of the underlying mapping codes array unless ``sort_gb=True``:: >>> d = {"StronglyAgree": 44, "Agree": 133, "Disagree": 75, "StronglyDisagree": 1, "NeitherAgreeNorDisagree": 144 } >>> codes = [1, 44, 44, 133, 75] # Note duplication and missing entry. >>> c = rt.Categorical(codes, categories=d) >>> c Categorical([StronglyDisagree, StronglyAgree, StronglyAgree, Agree, Disagree]) Length: 5 FastArray([ 1, 44, 44, 133, 75]) Base Index: None {44:'StronglyAgree', 133:'Agree', 75:'Disagree', 1:'StronglyDisagree', 144:'NeitherAgreeNorDisagree'} Unique count: 4 >>> vals = rt.arange(5) >>> ds = rt.Dataset({"c": c, "vals": vals}) >>> ds # c vals - ---------------- ---- 0 StronglyDisagree 0 1 StronglyAgree 1 2 StronglyAgree 2 3 Agree 3 4 Disagree 4 >>> c.sum(vals) *c vals --------------- ---- StronglyDisagre 0 StronglyAgree 3 Agree 3 Disagree 4 With ``sort_gb=True``, categories are displayed lexicographically sorted in groupby results:: >>> c = rt.Categorical(codes, categories=d, sort_gb=True) >>> c Categorical([StronglyDisagree, StronglyAgree, StronglyAgree, Agree, Disagree]) Length: 5 FastArray([ 1, 44, 44, 133, 75]) Base Index: None {44:'StronglyAgree', 133:'Agree', 75:'Disagree', 1:'StronglyDisagree', 144:'NeitherAgreeNorDisagree'} Unique count: 4 >>> c.sum(vals) *key_0 vals --------------- ---- Agree 3 Disagree 4 StronglyAgree 3 StronglyDisagre 0 Ordering of results from rt.cut and rt.qcut operations ------------------------------------------------------ With `cut` and `qcut`, when labels are provided they are held and displayed in the order of first appearance and are considered ordered in the context of logical comparisons:: >>> c = rt.cut(rt.arange(10), bins=3, labels=["z-label1", "y-label2", "x-label3"]) >>> c Categorical([z-label1, z-label1, z-label1, z-label1, y-label2, y-label2, y-label2, x-label3, x-label3, x-label3]) Length: 10 FastArray([1, 1, 1, 1, 2, 2, 2, 3, 3, 3], dtype=int8) Base Index: 1 FastArray([b'z-label1', b'y-label2', b'x-label3'], dtype='|S8') Unique count: 3 >>> c.sum(rt.arange(10)) *key_0 col_0 -------- ----- z-label1 6 y-label2 15 x-label3 24 >>> c > "z-label1" FastArray([False, False, False, False, True, True, True, True, True, True]) See :doc:`Comparisons <categoricals_user_guide_comparisons>` for more examples.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/categoricals_user_guide_order.rst
0.907132
0.720208
categoricals_user_guide_order.rst
pypi
Riptable Categoricals -- Constructing ************************************* .. currentmodule:: riptable There are many ways to construct a Categorical -- here are some of the more common ones. On this page: - `From a list of strings`_ - `From a list of non-unique strings and a list of unique categories`_ - `From a list of numeric values that index into a list of string categories`_ - `From a list of numeric values with no categories provided`_ - `From an integer-string dictionary and an array of integer mapping codes`_ - `From an IntEnum and an array of integer mapping codes`_ - `From a list of arrays or a dictionary: a multi-key Categorical`_ - `From a list of float values (Matlab indexing)`_ - `From a Pandas Categorical`_ - `Using the categories of another Categorical`_ - `From an array of values using rt.cut or rt.qcut`_ From a list of strings ---------------------- A Categorical is typically created from a list of strings (unicode or byte strings). An array of integer mapping codes is created, along with an array of the unique categories:: >>> c = rt.Categorical(["b", "a", "b", "a", "c", "c", "b"]) >>> c Categorical([b, a, b, a, c, c, b]) Length: 7 FastArray([2, 1, 2, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 By default, the integer mapping arrary uses base-1 indexing, with 0 reserved for Filtered values. From a list of non-unique strings and a list of unique categories ----------------------------------------------------------------- When categories are provided, they're always held in the same order, so you can preserve a non-lexicographical ordering. The provided categories don't need to represent all of the provided values -- note that the "xsmall" category has no index in the mapping array:: >>> rt.Categorical(["small", "small", "medium", "small", "large", "large", "medium"], ... categories=["xsmall", "small", "medium", "large"]) Categorical([small, small, medium, small, large, large, medium]) Length: 7 FastArray([2, 2, 3, 2, 4, 4, 3], dtype=int8) Base Index: 1 FastArray([b'xsmall', b'small', b'medium', b'large'], dtype='|S6') Unique count: 4 However, all of the values must appear in the provided categories, otherwise an error is raised:: >>> try: ... rt.Categorical(["small", "small", "medium", "small", "large", "large", "medium", "xlarge"], ... categories=["xsmall", "small", "medium", "large"]) ... except ValueError as e: ... print("ValueError:", e) ValueError: Found values that were not in provided categories: [b'xlarge'] From a list of numeric values that index into a list of string categories ------------------------------------------------------------------------- If you have an array of integers that indexes into an array of provided unique categories, the integers are used for the integer mapping array and the categories are held in the order provided. Because this is a base-1 Categorical, 0 is reserved for the Filtered category, and 1 and 2 are mapped to "small" and "medium", respectively:: >>> rt.Categorical([0, 1, 1, 2, 2, 0, 1, 1, 2, 1], categories=["small", "medium", "large"]) Categorical([Filtered, small, small, medium, medium, Filtered, small, small, medium, small]) Length: 10 FastArray([0, 1, 1, 2, 2, 0, 1, 1, 2, 1]) Base Index: 1 FastArray([b'small', b'medium', b'large'], dtype='|S6') Unique count: 3 You can set ``base_index=0`` to make the 0 not Filtered:: >>> rt.Categorical([0, 1, 1, 2, 2, 0, 1, 1, 2, 1], categories=["small", "medium", "large"], base_index=0) Categorical([small, medium, medium, large, large, small, medium, medium, large, medium]) Length: 10 FastArray([0, 1, 1, 2, 2, 0, 1, 1, 2, 1]) Base Index: 0 FastArray([b'small', b'medium', b'large'], dtype='|S6') Unique count: 3 From a list of numeric values with no categories provided --------------------------------------------------------- Note that when no categories are provided, the integer mapping codes start at 1 so that 0 values are not Filtered:: >>> rt.Categorical([10, 0, 0, 5, 5, 10, 0, 0, 5, 0]) Categorical([10, 0, 0, 5, 5, 10, 0, 0, 5, 0]) Length: 10 FastArray([3, 1, 1, 2, 2, 3, 1, 1, 2, 1], dtype=int8) Base Index: 1 FastArray([ 0, 5, 10]) Unique count: 3 From an integer-string dictionary and an array of integer mapping codes ----------------------------------------------------------------------- A dictionary can be used for the ``categories`` argument to provide a mapping between possibly non-consecutive or non-sequential mapping codes and strings. The dictionary can map integers to strings or string to integers. Provide a list of integer mapping codes as the first argument to the constructor (notice here that the provided codes have duplication and a missing entry):: >>> # Integer to string mapping. >>> d = {44: "StronglyAgree", 133: "Agree", 75: "Disagree", 1: "StronglyDisagree", 144: "NeitherAgreeNorDisagree" } >>> codes = [1, 44, 44, 133, 75] >>> rt.Categorical(codes, categories=d) Categorical([StronglyDisagree, StronglyAgree, StronglyAgree, Agree, Disagree]) Length: 5 FastArray([ 1, 44, 44, 133, 75]) Base Index: None {44:'StronglyAgree', 133:'Agree', 75:'Disagree', 1:'StronglyDisagree', 144:'NeitherAgreeNorDisagree'} Unique count: 4 >>> # String to integer mapping. >>> d = {"StronglyAgree": 44, "Agree": 133, "Disagree": 75, "StronglyDisagree": 1, "NeitherAgreeNorDisagree": 144 } >>> codes = [1, 44, 44, 133, 75] >>> c = rt.Categorical(codes, categories=d) >>> c Categorical([StronglyDisagree, StronglyAgree, StronglyAgree, Agree, Disagree]) Length: 5 FastArray([ 1, 44, 44, 133, 75]) Base Index: None {44:'StronglyAgree', 133:'Agree', 75:'Disagree', 1:'StronglyDisagree', 144:'NeitherAgreeNorDisagree'} Unique count: 4 Note that Categoricals created from a mapping dictionary have no base index. To see how this affects filtering, see the page on :doc:`Filters <categoricals_user_guide_filters>`. Also note that groupby results are displayed *not* in the order of the provided mapping dictionary, but the order of the underlying mapping codes array, unless you set ``sort_gb=True`` at Categorical creation:: >>> vals = rt.arange(5) >>> ds = rt.Dataset({"c": c, "vals": vals}) >>> ds # c vals - ---------------- ---- 0 StronglyDisagree 0 1 StronglyAgree 1 2 StronglyAgree 2 3 Agree 3 4 Disagree 5 >>> c.sum(vals) *c vals --------------- ---- StronglyDisagre 0 StronglyAgree 3 Agree 3 Disagree 4 See :doc:`Sorting and Display Order <categoricals_user_guide_order>` for examples. From an IntEnum and an array of integer mapping codes ----------------------------------------------------- Similar to a dictionary, a Python :py:class:`~enum.IntEnum` class defines a mapping between strings and possibly non-consecutive, non-sequential integer mapping codes. Similarly, the list of the integer codes is supplied as the first argument to the constructor, and the :py:class:`~enum.IntEnum` is provided as the ``categories`` argument:: >>> from enum import IntEnum >>> class LikertDecision(IntEnum): ... # A Likert scale with the typical five-level Likert item format. ... StronglyAgree = 44 ... Agree = 133 ... Disagree = 75 ... StronglyDisagree = 1 ... NeitherAgreeNorDisagree = 144 >>> codes = [1, 44, 44, 133, 75] >>> c = rt.Categorical(codes, categories=LikertDecision) >>> c Categorical([StronglyDisagree, StronglyAgree, StronglyAgree, Agree, Disagree]) Length: 5 FastArray([ 1, 44, 44, 133, 75]) Base Index: None {44:'StronglyAgree', 133:'Agree', 75:'Disagree', 1:'StronglyDisagree', 144:'NeitherAgreeNorDisagree'} Unique count: 4 As with Categoricals created from dictionaries, a Categorical created from an :py:class:`~enum.IntEnum` has no base index. To see how this affects filtering, see the page on :doc:`Filters <categoricals_user_guide_filters>`. Also similarly, aggregation results are displayed in the order of the mapping codes unless you set ``sort_gb=True`` at Categorical creation:: >>> c.sum(vals) *key_0 vals --------------- ---- StronglyDisagre 1 StronglyAgree 5 Agree 4 Disagree 5 See :doc:`Sorting and Display Order <categoricals_user_guide_order>` for examples. From a list of arrays or a dictionary: a multi-key Categorical -------------------------------------------------------------- Multi-key Categoricals let you create and operate on groupings based on multiple associated categories. The associated keys form a group:: >>> strs = rt.FastArray(["a", "b", "b", "a", "b", "a"]) >>> ints = rt.FastArray([2, 1, 1, 2, 1, 3]) >>> c = rt.Categorical([strs, ints]) # Create with a list of arrays. >>> c Categorical([(a, 2), (b, 1), (b, 1), (a, 2), (b, 1), (a, 3)]) Length: 6 FastArray([1, 2, 2, 1, 2, 3], dtype=int8) Base Index: 1 {'key_0': FastArray([b'a', b'b', b'a'], dtype='|S1'), 'key_1': FastArray([2, 1, 3])} Unique count: 3 >>> c.count() *key_0 *key_1 Count ------ ------ ----- a 2 2 b 1 3 a 3 1 >>> c2 = rt.Categorical({"Key1": strs, "Key2": ints}) # Create with a dict of key-value pairs. >>> c2 Categorical([(a, 2), (b, 1), (b, 1), (a, 2), (b, 1), (a, 3)]) Length: 6 FastArray([1, 2, 2, 1, 2, 3], dtype=int8) Base Index: 1 {'Key1': FastArray([b'a', b'b', b'a'], dtype='|S1'), 'Key2': FastArray([2, 1, 3])} Unique count: 3 >>> c2.count() *Key1 *Key2 Count ----- ----- ----- a 2 2 b 1 3 a 3 1 From a list of float values (Matlab indexing) --------------------------------------------- To convert a Matlab Categorical that uses float indices, set ``from_matlab=True``. The indices are converted to an integer type, and any 0.0 values are Filtered:: >>> rt.Categorical([0.0, 1.0, 2.0, 3.0, 1.0, 1.0], categories=["b", "c", "a"], from_matlab=True) Categorical([Filtered, b, c, a, b, b]) Length: 6 FastArray([0, 1, 2, 3, 1, 1], dtype=int8) Base Index: 1 FastArray([b'b', b'c', b'a'], dtype='|S1') Unique count: 3 From a Pandas Categorical ------------------------- Categoricals created from Pandas Categoricals must have a base-1 index to preserve invalid values. The invalid values become Filtered:: >>> import pandas as pd >>> pdc = pd.Categorical(["a", "a", "z", "b", "c"], ["c", "b", "a"]) >>> pdc ['a', 'a', NaN, 'b', 'c'] Categories (3, object): ['c', 'b', 'a'] >>> rt.Categorical(pdc) Categorical([a, a, Filtered, b, c]) Length: 5 FastArray([3, 3, 0, 2, 1], dtype=int8) Base Index: 1 FastArray([b'c', b'b', b'a'], dtype='|S1') Unique count: 3 Using the categories of another Categorical ------------------------------------------- >>> c = rt.Categorical(["a", "a", "b", "a", "c", "c", "b"], categories=["c", "b", "a"]) >>> c.category_array FastArray([b'c', b'b', b'a'], dtype='|S1') >>> c2 = rt.Categorical(["b", "c", "c", "b"], categories=c.category_array) >>> c2 Categorical([b, c, c, b]) Length: 4 FastArray([2, 1, 1, 2], dtype=int8) Base Index: 1 FastArray([b'c', b'b', b'a'], dtype='|S1') Unique count: 3 Note that the ``c2.category_array`` has the same values as ``c.category_array``, but it is a copy of and not a reference to the latter:: >>> c.category_array is c2.category_array False To create a `Categorical` that references the same categorical array, it must be constructed with indices and categories:: >>> c2 = rt.Categorical([1, 2, 1, 2], categories=c.category_array) >>> c.category_array is c2.category_array True From an array of values using ``rt.cut`` or ``rt.qcut`` ------------------------------------------------------- Both `cut` and `qcut` partition values into discrete bins that form the categories of a Categorical. With `cut`, values can be parititioned into a specified number of equal-width bins or bins bounded by specified endpoints. Here, they're parititioned into 3 equal-width bins:: >>> rt.cut(x=rt.FA([1, 7, 5, 4, 6, 3]), bins=3) Categorical([1.0->3.0, 5.0->7.0, 3.0->5.0, 3.0->5.0, 5.0->7.0, 1.0->3.0]) Length: 6 FastArray([1, 3, 2, 2, 3, 1], dtype=int8) Base Index: 1 FastArray([b'1.0->3.0', b'3.0->5.0', b'5.0->7.0'], dtype='|S8') Unique count: 3 Here the bins are bounded by specified endpoints. Values that fall outside of the bins are put in the Filtered category:: rt.cut(x=rt.FA([1, 7, 5, 4, 6, 3]), bins=[1, 3, 6]) Categorical([1.0->3.0, Filtered, 3.0->6.0, 3.0->6.0, 3.0->6.0, 1.0->3.0]) Length: 6 FastArray([1, 0, 2, 2, 2, 1], dtype=int8) Base Index: 1 FastArray([b'1.0->3.0', b'3.0->6.0'], dtype='|S8') Unique count: 2 The `qcut` function lets you partition values into bins based on sample quantiles:: >>> rt.qcut(rt.arange(5), q=4) Categorical([0.0->1.0, 0.0->1.0, 1.0->2.0, 2.0->3.0, 3.0->4.0]) Length: 5 FastArray([2, 2, 3, 4, 5], dtype=int8) Base Index: 1 FastArray([b'Clipped', b'0.0->1.0', b'1.0->2.0', b'2.0->3.0', b'3.0->4.0'], dtype='|S8') Unique count: 5 The 'Clipped' bin is created to hold any out-of-bounds values, such as when a value falls outside of a specified range. A 'Clipped' bin is different from a 'Filtered' bin:: >>> rt.qcut(rt.arange(5), q=[.1, .25, .5, .75, 1.], filter=[True, False, True, True, True]) Categorical([Clipped, Filtered, 1.5->2.5, 2.5->3.25, 3.25->4.0]) Length: 5 FastArray([1, 0, 3, 4, 5], dtype=int8) Base Index: 1 FastArray([b'Clipped', b'0.6->1.5', b'1.5->2.5', b'2.5->3.25', b'3.25->4.0'], dtype='|S9') Unique count: 5
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/categoricals_user_guide_construct.rst
0.883839
0.792544
categoricals_user_guide_construct.rst
pypi
Perform Group Operations with Categoricals ========================================== Riptable Categoricals have two related uses: - They efficiently store string (or other large dtype) arrays that have repeated values. The repeated values are partitioned into groups (a.k.a. categories), and each group is mapped to an integer. For example, in a Categorical that contains three ‘AAPL’ symbols and four ‘MSFT’ symbols, the data is partitioned into an ‘AAPL’ group that's mapped to 1 and a ‘MSFT’ group that's mapped to 2. This integer mapping allows the data to be stored and operated on more efficiently. - They’re Riptable’s class for doing group operations. A method applied to a Categorical is applied to each group separately. We’ll talk about group operations first, then look at how Categoricals store data under the hood. Here's a simple Dataset with repeated stock symbols and some values:: >>> ds = rt.Dataset() >>> ds.Symbol = rt.FA(['AAPL', 'MSFT', 'AAPL', 'TSLA', 'MSFT', 'TSLA']) >>> ds.Value = rt.FA([5, 10, 15, 20, 25, 30]) >>> ds # Symbol Value - ------ ----- 0 AAPL 5 1 MSFT 10 2 AAPL 15 3 TSLA 20 4 MSFT 25 5 TSLA 30 Categoricals for Group Operations --------------------------------- We know how to get the sum of the Value column:: >>> ds.Value.sum() 105 Categoricals make it just as easy to get the sum for each symbol. Use the Categorical constructor to turn the Symbol column into a Categorical:: >>> ds.Symbol = rt.Categorical(ds.Symbol) # Note: rt.Cat() also works. Now we call the ``sum()`` method on the Categorical, passing it the data we want to sum for each group:: >>> ds.Symbol.sum(ds.Value) *Symbol Value ------- ----- AAPL 20 MSFT 35 TSLA 50 A Dataset is returned containing the groups from the Categorical and the result of the operation we called on each group. Note the prepended '*' in the Symbol column. This indicates that the column was used as the grouping variable in an operation. Categoricals as Split, Apply, Combine Operations ------------------------------------------------ Hadley Wickham, known for his work on Rstats, described the operation (also known as a “group by” operation) as *split, apply, combine*. The illustration below shows how the groups are split based on the “keys” (or, in Riptable’s case, the Categorical values). The sum method is then applied to each group separately, and the results are combined into an output array. .. figure:: split-apply-combine-gray.svg :alt: The split-apply-combine operation Operations Supported by Categoricals ------------------------------------ Categoricals support most common reducing functions, which return one value per group. Some of the more common ones: ======================== ============================ **Reducing Function** **Description** ======================== ============================ ``count()`` Total number of items ``first()``, ``last()`` First item, last item ``mean()``, ``median()`` Mean, median ``min()``, ``max()`` Minimum, maximum ``std()``, ``var()`` Standard deviation, variance ``prod()`` Product of all items ``sum()`` Sum of all items ======================== ============================ Here’s the `complete list of Categorical reducing functions <tutorial_cat_reduce.rst>`__. Categoricals also support non-reducing functions. But because non-reducing functions return one value for each value in the original data, the results are a little different. For example, take ``cumsum()``, which is a running total. When it’s applied to a Categorical, the function does get applied to each group separately. However, the returned Dataset has one result per value of the original data:: >>> ds.Value2 = rt.FA([2, 10, 5, 25, 8, 20]) >>> ds.Symbol.cumsum(ds.Value2) # Value2 - ------ 0 2 1 10 2 7 3 25 4 18 5 45 The alignment of the result to the original data is easier to see if you add the results to the Dataset:: >>> ds.CumValue2 = ds.Symbol.cumsum(ds.Value2) >>> # Sort to make the cumulative sum per group more clear, then display only the relevant columns. >>> ds.sort_copy('Symbol').col_filter(['Symbol', 'Value2', 'CumValue2']) # Symbol Value2 CumValue2 - ------ ------ --------- 0 AAPL 2 2 1 AAPL 5 7 2 MSFT 10 10 3 MSFT 8 18 4 TSLA 25 25 5 TSLA 20 45 A commonly used non-reducing function is ``shift()``. You can use it to compare values with shifted versions of themselves – for example, today’s price compared to yesterday’s price, the volume compared to the volume an hour ago, etc. Where a category has no previous value to shift forward, the missing value is filled with an invalid value (e.g., ``Inv`` for integers or ``nan`` for floats):: >>> ds.PrevValue = ds.Symbol.shift(ds.Value) >>> ds.col_filter(['Symbol', 'Value', 'PrevValue']) # Symbol Value PrevValue - ------ ----- --------- 0 AAPL 5 Inv 1 MSFT 10 Inv 2 AAPL 15 5 3 TSLA 20 Inv 4 MSFT 25 10 5 TSLA 30 20 Other non-reducing functions include ``rolling_sum()``, ``rolling_mean()`` and their nan-versions ``rolling_nansum()`` and ``rolling_nanmean()``, and ``cumsum()`` and ``cumprod()``. Other functions not listed here can also be applied to Categoricals, including lambda functions and other user-defined functions, with the help of ``apply()``. More on that below. Expand the Results of Reducing Operations with ``transform`` ------------------------------------------------------------ Notice that if we try to add the result of a *reducing* operation to a Dataset, Riptable complains that the result isn’t the right length:: >>> try: ... ds.Mean = ds.Symbol.mean(ds.Value) ... except TypeError as e: ... print("TypeError:", e) TypeError: ('Row mismatch in Dataset._check_addtype. Tried to add Dataset of different lengths', 6, 3) You can expand the result of a reducing function so that it’s aligned with the original data by passing ``transform=True`` to the function:: >>> ds.MaxValue = ds.Symbol.max(ds.Value, transform=True) >>> ds.sort_copy(['Symbol', 'Value']).col_filter(['Symbol', 'Value', 'MaxValue']) # Symbol Value MaxValue - ------ ----- -------- 0 AAPL 5 15 1 AAPL 15 15 2 MSFT 10 25 3 MSFT 25 25 4 TSLA 20 30 5 TSLA 30 30 The max value per symbol is repeated for every instance of the symbol. Apply an Operation to Multiple Columns or a Dataset --------------------------------------------------- You can apply a function to multiple columns by passing a list of column names. Here's a reducing function applied to two columns:: >>> ds.Value3 = ds.Value * 2 # Add another column of data. >>> ds.Symbol.max([ds.Value, ds.Value3]) *Symbol Value Value3 ------- ----- ------ AAPL 15 30 MSFT 25 50 TSLA 30 60 Note the syntax for adding the results of an operation on two columns to a Dataset. To be the right length for the Dataset, the results have to be from a non-reducing function or a reducing function that has ``transform=True``:: >>> ds[['MaxValue', 'MaxValue3']] = ds.Symbol.max([ds.Value, ds.Value3], ... transform=True)[['Value', 'Value3']] # Symbol Value Value3 MaxValue MaxValue3 - ------ ----- ------ -------- --------- 0 AAPL 5 10 15 30 1 AAPL 15 30 15 30 2 MSFT 10 20 25 50 3 MSFT 25 50 25 50 4 TSLA 20 40 30 60 5 TSLA 30 60 30 60 You can also apply a function to a whole Dataset. Any column for which the function fails – for example, a numerical function on a string column – is not returned:: >>> ds.OptionType = list("PC")*3 # Add a string column. >>> ds.Symbol.max(ds) *Symbol Value CumValue Value3 MaxValue MaxValue3 ------- ----- -------- ------ -------- --------- AAPL 15 20 30 15 30 MSFT 25 35 50 25 50 TSLA 30 50 60 30 60 Categoricals for Storing Strings -------------------------------- To get a better sense of how Categoricals store data, let’s look at one under the hood:: >>> ds.Symbol Categorical([AAPL, MSFT, AAPL, TSLA, MSFT, TSLA]) Length: 6 FastArray([1, 2, 1, 3, 2, 3], dtype=int8) Base Index: 1 FastArray([b'AAPL', b'MSFT', b'TSLA'], dtype='|S4') Unique count: 3 The first line shows the 6 symbols. You can access the array with ``expand_array``:: >>> ds.Symbol.expand_array FastArray([b'AAPL', b'MSFT', b'AAPL', b'TSLA', b'MSFT', b'TSLA'], dtype='|S8') The second line is a FastArray of integers, with one integer for each unique category of the Categorical. It's accessible with ``_fa``:: >>> ds.Symbol._fa FastArray([1, 2, 1, 3, 2, 3], dtype=int8) The list of unique categories is shown in the third line. You can access the list with ``category_array``:: >>> ds.Symbol.category_array FastArray([b'AAPL', b'MSFT', b'TSLA'], dtype='|S4') It’s the same thing we get if we do:: >>> ds.Symbol.unique() FastArray([b'AAPL', b'MSFT', b'TSLA'], dtype='|S4') We can get a better picture of the mapping by putting the integer FastArray into the Dataset:: >>> ds.Mapping = ds.Symbol._fa >>> ds.col_filter(['Symbol', 'Mapping']) # Symbol Mapping - ------ ------- 0 AAPL 1 1 MSFT 2 2 AAPL 1 3 TSLA 3 4 MSFT 2 5 TSLA 3 Because it’s much more efficient to pass around integers than it is to pass around strings, it’s common for string data with repeated values to be stored using integer mapping. If you have data stored as integers (for example, datetime data), you can create a Categorical using the integer array and an array of unique categories:: >>> c = rt.Categorical([1, 3, 2, 2, 1, 3, 3, 1], categories=['a','b','c']) >>> c Categorical([a, c, b, b, a, c, c, a]) Length: 8 FastArray([1, 3, 2, 2, 1, 3, 3, 1]) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Notice that in this Categorical and the one we created above, the base index is 1, not 0. This brings us to an important note about Categoricals: By default, the base index is 1; 0 is reserved for holding any values of the Categorical that are filtered out of operations on the Categorical. Values can be filtered out of all operations or specific ones. Filter Values or Categories from All Categorical Operations ----------------------------------------------------------- When you create a Categorical, you can filter certain values or entire categories from all operations on it. We'll start with filtering values. Say we have a Dataset with symbols 'A' and 'B' that are associated with exchanges 'X', 'Y', and 'Z'. >>> # Create the Dataset. >>> rng = np.random.default_rng(seed=42) >>> N = 25 >>> symbol_exchange = rt.Dataset() >>> symbol_exchange.Symbol = rt.FA(rng.choice(['A', 'B'], N)) >>> symbol_exchange.Exchange = rt.FA(rng.choice(['X', 'Y', 'Z'], N)) >>> symbol_exchange # Symbol Exchange -- ------ -------- 0 B Y 1 A X 2 B Z 3 A Y 4 B Y 5 B Y 6 B Y 7 B X 8 A Y 9 B Z 10 A X 11 B Y 12 B X 13 A Y 14 B Y 15 B Z 16 A X 17 A X 18 A Y 19 A Z 20 A Z 21 B X 22 B X 23 A Z 24 B X We want to make the Symbol column a Categorical, but we're interested in only the symbol values that are associated with the 'X' exchange. When we create the Categorical, we can use the ``filter`` keyword argument with a Boolean mask array that's True for symbol values associated with the 'X' exchange:: >>> exchangeX = symbol_exchange.Exchange == 'X' # Create a mask array. >>> c_x = rt.Cat(symbol_exchange.Symbol, filter=exchangeX) When we view the Categorical, we can see that symbol values associated with exchanges 'Y' and 'Z' are shown as 'Filtered', and the 'Filtered' values are mapped to the 0 index in the integer array:: >>> c_x Categorical([Filtered, A, Filtered, Filtered, Filtered, ..., Filtered, B, B, Filtered, B]) Length: 25 FastArray([0, 1, 0, 0, 0, ..., 0, 2, 2, 0, 2], dtype=int8) Base Index: 1 FastArray([b'A', b'B'], dtype='|S1') Unique count: 2 To better see what's filtered, we can add it to the Dataset:: >>> symbol_exchange.Filtered = c_x # Symbol Exchange Filtered -- ------ -------- -------- 0 B Y Filtered 1 A X A 2 B Z Filtered 3 A Y Filtered 4 B Y Filtered 5 B Y Filtered 6 B Y Filtered 7 B X B 8 A Y Filtered 9 B Z Filtered 10 A X A 11 B Y Filtered 12 B X B 13 A Y Filtered 14 B Y Filtered 15 B Z Filtered 16 A X A 17 A X A 18 A Y Filtered 19 A Z Filtered 20 A Z Filtered 21 B X B 22 B X B 23 A Z Filtered 24 B X B Now, a group operation applied to the Categorical omits the filtered values:: >>> c_x.count() *Symbol Count ------- ----- A 4 B 5 Filtering out an entire category (here, the 'A' symbol) is similar:: >>> f_A = symbol_exchange.Symbol != 'A' >>> c_b = rt.Categorical(symbol_exchange.Symbol, filter=f_A) >>> c_b Categorical([B, Filtered, B, Filtered, B, ..., Filtered, B, B, Filtered, B]) Length: 25 FastArray([1, 0, 1, 0, 1, ..., 0, 1, 1, 0, 1], dtype=int8) Base Index: 1 FastArray([b'B'], dtype='|S1') Unique count: 1 The filtered category is entirely omitted from operations:: >>> c_b.count() *Symbol Count ------- ----- B 14 If you're creating a Categorical from integers and provided categories, another way to filter a category is to map it to 0. Because 0 is reserved for the Filtered category, here 'a' is mapped to 1 and 'b' is mapped to 2. And because there's no 3 to map to 'c', 'c' becomes Filtered:: >>> c1 = rt.Categorical([0, 2, 1, 1, 0, 2, 2, 0], categories=['a','b','c']) >>> c1 Categorical([Filtered, b, a, a, Filtered, b, b, Filtered]) Length: 8 FastArray([0, 2, 1, 1, 0, 2, 2, 0]) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 In this case, the filtered category appears in the result, but it's still omitted from calculations on the Categorical:: >>> c1.count() *key_0 Count ------ ----- a 2 b 3 c 0 Note that the first column in the output is labeled 'key_0'. This was code-generated because there was no explicit column name declaration. You can use the :meth:`.FastArray.set_name` method to assign a column name to the Categorical before doing any grouping operations. The Count column was created by the ``count()`` method. Filter Values or Categories from Certain Categorical Operations --------------------------------------------------------------- It’s also possible to filter values for only a certain operation. In `Get and Operate on Subsets of Data Using Filters <tutorial_filters.rst>`__, we saw that many operations called on FastArrays / Dataset columns take a ``filter`` keyword argument that limits the data operated on:: >>> a = rt.FA([1, 2, 3, 4, 5]) >>> a.mean(filter=a > 2) 4.0 It's similar with Categoricals:: >>> Symbol = rt.Cat(rt.FA(['AAPL', 'MSFT', 'AAPL', 'TSLA', 'MSFT', 'TSLA'])) >>> Value = rt.FA([5, 10, 15, 20, 25, 30]) >>> Symbol.mean(Value, filter=Value > 20.0) *key_0 col_0 ------ ----- AAPL nan MSFT 25.00 TSLA 30.00 The data that doesn’t meet the condition is omitted from the computation for only that operation. To filter out an entire category:: >>> ds.Symbol.mean(ds.Value, filter=ds.Symbol != 'MSFT') *Symbol Value ------- ----- AAPL 10.00 MSFT nan TSLA 25.00 In this case, the filtered category is shown, but the result of the operation on its values is NaN. If you want to make sure your filter is doing what you intend before you apply a function to the filtered data, you can call ``set_valid()`` on the Categorical. Calling ``set_valid()`` on a Categorical returns a Categorical of the same length in which everywhere the filter result is False, the category gets set to ‘Filtered’ and the associated index value is 0. This is in contrast to filtered Datasets, where ``filter()`` returns a smaller Dataset, reduced to only the rows where the filter result is True (where the filter condition is met). >>> Symbol.set_valid(ds.Value > 20.0) Categorical([Filtered, Filtered, Filtered, Filtered, MSFT, TSLA]) Length: 6 FastArray([0, 0, 0, 0, 1, 2], dtype=int8) Base Index: 1 FastArray([b'MSFT', b'TSLA'], dtype='|S4') Unique count: 2 To more closely spot-check, put the filtered values in a Dataset:: >>> ds_test = rt.Dataset() >>> ds_test.SymbolTest = ds.Symbol.set_valid(ds.Value > 20.0) >>> ds_test.ValueTest = ds.Value >>> ds_test # SymbolTest ValueTest - ---------- --------- 0 Filtered 5 1 Filtered 10 2 Filtered 15 3 Filtered 20 4 MSFT 25 5 TSLA 30 The advice to avoid making unnecessary copies of large amounts of data using ``set_valid()`` also applies to Categoricals. Multi-Key Categoricals ---------------------- Multi-key Categoricals let you create and operate on groupings based on two related categories. An example is a symbol-month pair, which you could use to get the average value of a stock for each month in your data:: >>> ds_mk = rt.Dataset() >>> N = 25 >>> ds_mk.Symbol = rt.FA(rng.choice(['AAPL', 'AMZN', 'MSFT'], N)) >>> ds_mk.Value = rt.FA(rng.random(N)) >>> ds_mk.Date = rt.Date.range('20210101', '20211231', step=15) >>> ds_mk # Symbol Value Date -- ------ ----- ---------- 0 AAPL 0.59 2021-01-01 1 MSFT 0.78 2021-01-16 2 AAPL 0.80 2021-01-31 3 AAPL 0.95 2021-02-15 4 AMZN 0.25 2021-03-02 5 MSFT 0.59 2021-03-17 6 AMZN 0.10 2021-04-01 7 MSFT 0.62 2021-04-16 8 MSFT 0.17 2021-05-01 9 AAPL 0.56 2021-05-16 10 MSFT 0.57 2021-05-31 11 AMZN 0.47 2021-06-15 12 AMZN 0.52 2021-06-30 13 AAPL 0.76 2021-07-15 14 AMZN 0.80 2021-07-30 15 MSFT 0.49 2021-08-14 16 AMZN 0.60 2021-08-29 17 AAPL 0.93 2021-09-13 18 AMZN 0.12 2021-09-28 19 MSFT 0.12 2021-10-13 20 MSFT 0.09 2021-10-28 21 AAPL 0.66 2021-11-12 22 MSFT 0.42 2021-11-27 23 MSFT 0.77 2021-12-12 24 AAPL 0.67 2021-12-27 We want to group the dates by month. An easy way to do this is by using ``start_of_month``:: >>> ds_mk.Month = ds_mk.Date.start_of_month >>> ds_mk # Symbol Value Date Month -- ------ ----- ---------- ---------- 0 AAPL 0.59 2021-01-01 2021-01-01 1 MSFT 0.78 2021-01-16 2021-01-01 2 AAPL 0.80 2021-01-31 2021-01-01 3 AAPL 0.95 2021-02-15 2021-02-01 4 AMZN 0.25 2021-03-02 2021-03-01 5 MSFT 0.59 2021-03-17 2021-03-01 6 AMZN 0.10 2021-04-01 2021-04-01 7 MSFT 0.62 2021-04-16 2021-04-01 8 MSFT 0.17 2021-05-01 2021-05-01 9 AAPL 0.56 2021-05-16 2021-05-01 10 MSFT 0.57 2021-05-31 2021-05-01 11 AMZN 0.47 2021-06-15 2021-06-01 12 AMZN 0.52 2021-06-30 2021-06-01 13 AAPL 0.76 2021-07-15 2021-07-01 14 AMZN 0.80 2021-07-30 2021-07-01 15 MSFT 0.49 2021-08-14 2021-08-01 16 AMZN 0.60 2021-08-29 2021-08-01 17 AAPL 0.93 2021-09-13 2021-09-01 18 AMZN 0.12 2021-09-28 2021-09-01 19 MSFT 0.12 2021-10-13 2021-10-01 20 MSFT 0.09 2021-10-28 2021-10-01 21 AAPL 0.66 2021-11-12 2021-11-01 22 MSFT 0.42 2021-11-27 2021-11-01 23 MSFT 0.77 2021-12-12 2021-12-01 24 AAPL 0.67 2021-12-27 2021-12-01 Now all Dates in January are associated to 2021-01-01, all Dates in February are associated to 2021-02-01, etc. These firsts of the month are our month groups. We create a multi-key Categorical by passing ``rt.Cat()`` the Symbol and Month columns:: >>> ds_mk.Symbol_Month = rt.Cat([ds_mk.Symbol, ds_mk.Month]) >>> ds_mk.Symbol_Month Categorical([(AAPL, 2021-01-01), (MSFT, 2021-01-01), (AAPL, 2021-01-01), (AAPL, 2021-02-01), (AMZN, 2021-03-01), ..., (MSFT, 2021-10-01), (AAPL, 2021-11-01), (MSFT, 2021-11-01), (MSFT, 2021-12-01), (AAPL, 2021-12-01)]) Length: 25 FastArray([ 1, 2, 1, 3, 4, ..., 17, 18, 19, 20, 21], dtype=int8) Base Index: 1 {'Symbol': FastArray([b'AAPL', b'MSFT', b'AAPL', b'AMZN', b'MSFT', ..., b'MSFT', b'AAPL', b'MSFT', b'MSFT', b'AAPL'], dtype='|S4'), 'Month': Date(['2021-01-01', '2021-01-01', '2021-02-01', '2021-03-01', '2021-03-01', ..., '2021-10-01', '2021-11-01', '2021-11-01', '2021-12-01', '2021-12-01'])} Unique count: 21 And now we can get the average value for each symbol-month pair:: >>> ds_mk.Symbol_Month.mean(ds_mk.Value) *Symbol *Month Value ------- ---------- ----- AAPL 2021-01-01 0.69 MSFT 2021-01-01 0.78 AAPL 2021-02-01 0.95 AMZN 2021-03-01 0.25 MSFT 2021-03-01 0.59 AMZN 2021-04-01 0.10 MSFT 2021-04-01 0.62 . 2021-05-01 0.37 AAPL 2021-05-01 0.56 AMZN 2021-06-01 0.49 AAPL 2021-07-01 0.76 AMZN 2021-07-01 0.80 MSFT 2021-08-01 0.49 AMZN 2021-08-01 0.60 AAPL 2021-09-01 0.93 AMZN 2021-09-01 0.12 MSFT 2021-10-01 0.10 AAPL 2021-11-01 0.66 MSFT 2021-11-01 0.42 . 2021-12-01 0.77 AAPL 2021-12-01 0.67 The aggregated results are presented with the two group keys arranged hierarchically. The dot indicates that the category above is repeated. All the functions supported by Categoricals can also be used for multi-key Categoricals. You can also filter multi-key Categoricals by calling ``set_valid()`` on the Categorical, and operate on filtered data by passing the filter keyword argument to the function you use. Later on we’ll cover another Riptable function, ``Accum2()``, that aggregates two groups similarly but provides summary data and a styled output. Partition Numeric Data into Bins for Analysis --------------------------------------------- When you have a large array of numeric data, ``rt.cut()`` and ``rt.qcut()`` can help you partition the values into Categorical bins for analysis. Use ``cut()`` to create equal-width bins or bins defined by specified endpoints. Use ``qcut()`` to create bins based on sample quantiles. Let’s use a moderately large Dataset:: >>> N = 1_000 >>> ds2 = rt.Dataset() >>> ds2.Symbol = rt.FA(rng.choice(['AAPL', 'AMZN', 'MSFT'], N)) >>> base_price = 100 + rt.FA(np.linspace(0, 900, N)) >>> noise = rt.FA(rng.normal(0, 50, N)) >>> ds2.Price = base_price + noise >>> ds2 # Symbol Price --- ------ -------- 0 AMZN 93.87 1 AMZN 150.69 2 AAPL 154.76 3 MSFT 153.99 4 AMZN 105.55 5 AMZN 62.25 6 MSFT 51.22 7 AMZN 123.54 8 AAPL 126.17 9 AAPL 172.47 10 AAPL 164.01 11 MSFT 103.30 12 AAPL 48.60 13 AAPL 95.76 14 AMZN 123.47 ... ... ... 985 AMZN 1,027.85 986 AAPL 993.06 987 AMZN 867.37 988 AAPL 940.92 989 AAPL 1,025.38 990 MSFT 1,052.54 991 AAPL 1,048.25 992 AMZN 914.09 993 AMZN 1,009.67 994 AAPL 1,046.27 995 AAPL 913.48 996 AMZN 996.90 997 AMZN 1,011.89 998 MSFT 984.06 999 MSFT 907.39 Create equal-width bins with ``rt.cut()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To partition values into equal-width bins, use ``cut()`` and specify the number of bins:: >>> ds2.PriceBin = rt.cut(ds2.Price, bins=5) >>> ds2 # Symbol Price PriceBin --- ------ -------- ----------------- 0 AMZN 93.87 -3.011->221.182 1 AMZN 150.69 -3.011->221.182 2 AAPL 154.76 -3.011->221.182 3 MSFT 153.99 -3.011->221.182 4 AMZN 105.55 -3.011->221.182 5 AMZN 62.25 -3.011->221.182 6 MSFT 51.22 -3.011->221.182 7 AMZN 123.54 -3.011->221.182 8 AAPL 126.17 -3.011->221.182 9 AAPL 172.47 -3.011->221.182 10 AAPL 164.01 -3.011->221.182 11 MSFT 103.30 -3.011->221.182 12 AAPL 48.60 -3.011->221.182 13 AAPL 95.76 -3.011->221.182 14 AMZN 123.47 -3.011->221.182 ... ... ... ... 985 AMZN 1,027.85 893.763->1117.956 986 AAPL 993.06 893.763->1117.956 987 AMZN 867.37 669.569->893.763 988 AAPL 940.92 893.763->1117.956 989 AAPL 1,025.38 893.763->1117.956 990 MSFT 1,052.54 893.763->1117.956 991 AAPL 1,048.25 893.763->1117.956 992 AMZN 914.09 893.763->1117.956 993 AMZN 1,009.67 893.763->1117.956 994 AAPL 1,046.27 893.763->1117.956 995 AAPL 913.48 893.763->1117.956 996 AMZN 996.90 893.763->1117.956 997 AMZN 1,011.89 893.763->1117.956 998 MSFT 984.06 893.763->1117.956 999 MSFT 907.39 893.763->1117.956 Notice that the bins form the categories of a Categorical:: >>> ds2.PriceBin Categorical([-3.011->221.182, -3.011->221.182, -3.011->221.182, -3.011->221.182, -3.011->221.182, ..., 893.763->1117.956, 893.763->1117.956, 893.763->1117.956, 893.763->1117.956, 893.763->1117.956]) Length: 1000 FastArray([1, 1, 1, 1, 1, ..., 5, 5, 5, 5, 5], dtype=int8) Base Index: 1 FastArray([b'-3.011->221.182', b'221.182->445.376', b'445.376->669.569', b'669.569->893.763', b'893.763->1117.956'], dtype='|S17') Unique count: 5 To specify your own bin endpoints, provide an array. Here, we define two bins: one for prices from 0 to 600 (with both endpoints, 0 and 600, included), and one for prices from 600 to 1,200 (600 excluded, 1,200 included):: >>> bins = [0, 600, 1200] >>> ds2.PriceBin2 = rt.cut(ds2.Price, bins) >>> ds2 # Symbol Price PriceBin PriceBin2 --- ------ -------- ----------------- ------------- 0 AMZN 93.87 -3.011->221.182 0.0->600.0 1 AMZN 150.69 -3.011->221.182 0.0->600.0 2 AAPL 154.76 -3.011->221.182 0.0->600.0 3 MSFT 153.99 -3.011->221.182 0.0->600.0 4 AMZN 105.55 -3.011->221.182 0.0->600.0 5 AMZN 62.25 -3.011->221.182 0.0->600.0 6 MSFT 51.22 -3.011->221.182 0.0->600.0 7 AMZN 123.54 -3.011->221.182 0.0->600.0 8 AAPL 126.17 -3.011->221.182 0.0->600.0 9 AAPL 172.47 -3.011->221.182 0.0->600.0 10 AAPL 164.01 -3.011->221.182 0.0->600.0 11 MSFT 103.30 -3.011->221.182 0.0->600.0 12 AAPL 48.60 -3.011->221.182 0.0->600.0 13 AAPL 95.76 -3.011->221.182 0.0->600.0 14 AMZN 123.47 -3.011->221.182 0.0->600.0 ... ... ... ... ... 985 AMZN 1,027.85 893.763->1117.956 600.0->1200.0 986 AAPL 993.06 893.763->1117.956 600.0->1200.0 987 AMZN 867.37 669.569->893.763 600.0->1200.0 988 AAPL 940.92 893.763->1117.956 600.0->1200.0 989 AAPL 1,025.38 893.763->1117.956 600.0->1200.0 990 MSFT 1,052.54 893.763->1117.956 600.0->1200.0 991 AAPL 1,048.25 893.763->1117.956 600.0->1200.0 992 AMZN 914.09 893.763->1117.956 600.0->1200.0 993 AMZN 1,009.67 893.763->1117.956 600.0->1200.0 994 AAPL 1,046.27 893.763->1117.956 600.0->1200.0 995 AAPL 913.48 893.763->1117.956 600.0->1200.0 996 AMZN 996.90 893.763->1117.956 600.0->1200.0 997 AMZN 1,011.89 893.763->1117.956 600.0->1200.0 998 MSFT 984.06 893.763->1117.956 600.0->1200.0 999 MSFT 907.39 893.763->1117.956 600.0->1200.0 Create bins based on sample quantiles with ``rt.qcut()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To partition values into bins based on sample quantiles, use ``qcut()``. We’ll create another Dataset with symbol groups and contracts per day:: >>> N = 1_000 >>> ds3 = rt.Dataset() >>> ds3.SymbolGroup = rt.FA(rng.choice(['spx', 'eqt_comp', 'eqt300', 'eqtrest'], N)) >>> ds3.ContractsPerDay = rng.integers(low=0, high=5_000, size=N) >>> ds3.head() # SymbolGroup ContractsPerDay -- ----------- --------------- 0 eqt300 1,624 1 spx 851 2 spx 3,487 3 eqt300 345 4 eqtrest 2,584 5 spx 3,639 6 spx 4,741 7 eqtrest 1,440 8 eqtrest 39 9 spx 3,618 10 eqt_comp 7 11 eqt300 331 12 spx 4,952 13 eqt_comp 4,312 14 eqt_comp 3,537 15 eqt300 4,177 16 eqt_comp 376 17 eqt_comp 444 18 eqt_comp 1,504 19 eqtrest 118 Create three labeled, quantile-based bins for the volume:: >>> label_names = ['Low', 'Medium', 'High'] >>> ds3.Volume = rt.qcut(ds3.ContractsPerDay, q=3, labels=label_names) >>> ds3.head() # SymbolGroup ContractsPerDay Volume -- ----------- --------------- ------ 0 eqt300 1,624 Low 1 spx 851 Low 2 spx 3,487 High 3 eqt300 345 Low 4 eqtrest 2,584 Medium 5 spx 3,639 High 6 spx 4,741 High 7 eqtrest 1,440 Low 8 eqtrest 39 Low 9 spx 3,618 High 10 eqt_comp 7 Low 11 eqt300 331 Low 12 spx 4,952 High 13 eqt_comp 4,312 High 14 eqt_comp 3,537 High 15 eqt300 4,177 High 16 eqt_comp 376 Low 17 eqt_comp 444 Low 18 eqt_comp 1,504 Low 19 eqtrest 118 Low As with ``cut()``, the bins form the categories of a Categorical:: >>> ds3.Volume Categorical([High, High, Medium, High, Low, ..., Low, Medium, High, Low, Low]) Length: 1000 FastArray([4, 4, 3, 4, 2, ..., 2, 3, 4, 2, 2], dtype=int8) Base Index: 1 FastArray([b'Clipped', b'Low', b'Medium', b'High'], dtype='|S7') Unique count: 4 The 'Clipped' bin is created to hold any out-of-bounds values (though there are none in this case). Alternatively, you can give ``qcut()`` a list of quantiles (numbers between 0 and 1, inclusive). Here, we create quartiles:: >>> quartiles = [0, .25, .5, .75, 1.] >>> ds3.VolQuartiles = rt.qcut(ds3.ContractsPerDay, q=quartiles) >>> ds3.head() # SymbolGroup ContractsPerDay Volume VolQuartiles -- ----------- --------------- ------ --------------- 0 eqt300 1,624 Low 1273.75->2601.0 1 spx 851 Low 0.0->1273.75 2 spx 3,487 High 2601.0->3793.0 3 eqt300 345 Low 0.0->1273.75 4 eqtrest 2,584 Medium 1273.75->2601.0 5 spx 3,639 High 2601.0->3793.0 6 spx 4,741 High 3793.0->4991.0 7 eqtrest 1,440 Low 1273.75->2601.0 8 eqtrest 39 Low 0.0->1273.75 9 spx 3,618 High 2601.0->3793.0 10 eqt_comp 7 Low 0.0->1273.75 11 eqt300 331 Low 0.0->1273.75 12 spx 4,952 High 3793.0->4991.0 13 eqt_comp 4,312 High 3793.0->4991.0 14 eqt_comp 3,537 High 2601.0->3793.0 15 eqt300 4,177 High 3793.0->4991.0 16 eqt_comp 376 Low 0.0->1273.75 17 eqt_comp 444 Low 0.0->1273.75 18 eqt_comp 1,504 Low 1273.75->2601.0 19 eqtrest 118 Low 0.0->1273.75 Per-Group Calculations with Other Functions ------------------------------------------- Categoricals support most common functions. For functions that aren’t supported (for example, a function you’ve written), you can use ``apply_reduce()`` to apply a reducing function and ``apply_nonreduce()`` to apply a non-reducing function. ``apply_reduce()`` ^^^^^^^^^^^^^^^^^^ The function you use with ``apply_reduce()`` can take in one or multiple columns/FastArrays as input, but it must return a single value per group. To illustrate, we’ll use ``apply_reduce()`` with two simple lambda functions that each return one value. (A lambda function is an anonymous function that consists of a single statement and gives back a return value. When you have a function that takes a function as an argument, using a lambda function as the argument can sometimes be simpler and clearer than defining a function separately.) First, we’ll create a new Dataset:: >>> N = 50 >>> ds = rt.Dataset() >>> ds.Symbol = rt.Cat(rng.choice(['AAPL', 'AMZN', 'TSLA', 'SPY', 'GME'], N)) >>> ds.Value = rng.random(N) * 100 >>> ds.Value2 = ds.Value * 2 >>> ds.sample() # Symbol Value Value2 - ------ ----- ------ 0 SPY 41.04 82.09 1 TSLA 93.07 186.14 2 AMZN 2.03 4.05 3 AAPL 16.19 32.37 4 AMZN 2.42 4.85 5 TSLA 98.13 196.26 6 SPY 98.67 197.34 7 SPY 62.31 124.61 8 TSLA 96.79 193.58 9 TSLA 67.35 134.70 The first lambda function takes one column as input:: >>> # ds.Value becomes the 'x' in our lambda function. >>> ds.Symbol.apply_reduce(lambda x: x.min() + 2, ds.Value) *Symbol Value ------- ----- AAPL 11.36 AMZN 4.03 GME 16.65 SPY 7.76 TSLA 2.10 Our second lambda function takes two columns as input:: >>> ds.Symbol.apply_reduce(lambda x, y: x.sum() * y.mean(), (ds.Value, ds.Value2)) *Symbol Value ------- --------- AAPL 26,904.13 AMZN 39,400.64 GME 26,857.53 SPY 32,560.75 TSLA 74,124.69 Also note that in this example, the first column listed in the tuple is the column name shown in the output. If you like, you can use ``transform=True`` to expand the results and assign them to a column:: >>> ds.MyCalc1 = ds.Symbol.apply_reduce(lambda x: x.min() + 2, ds.Value, transform=True) >>> ds.MyCalc2 = ds.Symbol.apply_reduce(lambda x, y: x.sum() * y.mean(), ... (ds.Value, ds.Value2), transform=True) >>> ds # Symbol Value Value2 MyCalc1 MyCalc2 --- ------ ----- ------ ------- --------- 0 AAPL 12.39 24.77 11.36 26,904.13 1 SPY 41.04 82.09 7.76 32,560.75 2 AMZN 55.69 111.39 4.03 39,400.64 3 TSLA 93.07 186.14 2.10 74,124.69 4 TSLA 3.62 7.24 2.10 74,124.69 5 TSLA 62.15 124.29 2.10 74,124.69 6 SPY 45.77 91.55 7.76 32,560.75 7 AMZN 2.03 4.05 4.03 39,400.64 8 SPY 24.95 49.91 7.76 32,560.75 9 AMZN 11.85 23.70 4.03 39,400.64 10 AMZN 21.68 43.36 4.03 39,400.64 11 TSLA 27.46 54.91 2.10 74,124.69 12 GME 40.13 80.26 16.65 26,857.53 13 AMZN 52.90 105.81 4.03 39,400.64 14 TSLA 0.10 0.20 2.10 74,124.69 ... ... ... ... ... ... 35 TSLA 38.40 76.79 2.10 74,124.69 36 AAPL 93.12 186.25 11.36 26,904.13 37 SPY 14.92 29.85 7.76 32,560.75 38 AAPL 99.71 199.41 11.36 26,904.13 39 TSLA 37.91 75.83 2.10 74,124.69 40 GME 64.88 129.75 16.65 26,857.53 41 TSLA 96.79 193.58 2.10 74,124.69 42 SPY 5.76 11.52 7.76 32,560.75 43 TSLA 92.29 184.57 2.10 74,124.69 44 AMZN 56.78 113.56 4.03 39,400.64 45 AMZN 70.44 140.88 4.03 39,400.64 46 TSLA 14.92 29.84 2.10 74,124.69 47 AAPL 53.34 106.68 11.36 26,904.13 48 TSLA 67.35 134.70 2.10 74,124.69 49 TSLA 45.62 91.25 2.10 74,124.69 As expected, every instance of a category gets the same value. ``apply_nonreduce()`` ^^^^^^^^^^^^^^^^^^^^^ For ``apply_nonreduce()``, our lambda function computes a new value for every element of the original input:: >>> ds.MyCalc3 = ds.Symbol.apply_nonreduce(lambda x: x.cumsum() + 2, ds.Value) >>> ds # Symbol Value Value2 MyCalc1 MyCalc2 MyCalc3 --- ------ ----- ------ ------- --------- ------- 0 AAPL 12.39 24.77 11.36 26,904.13 14.39 1 SPY 41.04 82.09 7.76 32,560.75 43.04 2 AMZN 55.69 111.39 4.03 39,400.64 57.69 3 TSLA 93.07 186.14 2.10 74,124.69 95.07 4 TSLA 3.62 7.24 2.10 74,124.69 98.69 5 TSLA 62.15 124.29 2.10 74,124.69 160.84 6 SPY 45.77 91.55 7.76 32,560.75 88.82 7 AMZN 2.03 4.05 4.03 39,400.64 59.72 8 SPY 24.95 49.91 7.76 32,560.75 113.77 9 AMZN 11.85 23.70 4.03 39,400.64 71.57 10 AMZN 21.68 43.36 4.03 39,400.64 93.25 11 TSLA 27.46 54.91 2.10 74,124.69 188.30 12 GME 40.13 80.26 16.65 26,857.53 42.13 13 AMZN 52.90 105.81 4.03 39,400.64 146.15 14 TSLA 0.10 0.20 2.10 74,124.69 188.40 ... ... ... ... ... ... ... 35 TSLA 38.40 76.79 2.10 74,124.69 417.18 36 AAPL 93.12 186.25 11.36 26,904.13 133.05 37 SPY 14.92 29.85 7.76 32,560.75 399.73 38 AAPL 99.71 199.41 11.36 26,904.13 232.76 39 TSLA 37.91 75.83 2.10 74,124.69 455.09 40 GME 64.88 129.75 16.65 26,857.53 261.12 41 TSLA 96.79 193.58 2.10 74,124.69 551.88 42 SPY 5.76 11.52 7.76 32,560.75 405.49 43 TSLA 92.29 184.57 2.10 74,124.69 644.17 44 AMZN 56.78 113.56 4.03 39,400.64 437.63 45 AMZN 70.44 140.88 4.03 39,400.64 508.07 46 TSLA 14.92 29.84 2.10 74,124.69 659.09 47 AAPL 53.34 106.68 11.36 26,904.13 286.10 48 TSLA 67.35 134.70 2.10 74,124.69 726.44 49 TSLA 45.62 91.25 2.10 74,124.69 772.06 Like ``apply_reduce()``, ``apply_nonreduce()`` can take one or multiple columns as input:: >>> ds.MyCalc4 = ds.Symbol.apply_nonreduce(lambda x, y: x.cumsum() + y, (ds.Value, ds.Value2)) >>> ds # Symbol Value Value2 MyCalc1 MyCalc2 MyCalc3 MyCalc4 --- ------ ----- ------ ------- --------- ------- ------- 0 AAPL 12.39 24.77 11.36 26,904.13 14.39 37.16 1 SPY 41.04 82.09 7.76 32,560.75 43.04 123.13 2 AMZN 55.69 111.39 4.03 39,400.64 57.69 167.08 3 TSLA 93.07 186.14 2.10 74,124.69 95.07 279.21 4 TSLA 3.62 7.24 2.10 74,124.69 98.69 103.94 5 TSLA 62.15 124.29 2.10 74,124.69 160.84 283.13 6 SPY 45.77 91.55 7.76 32,560.75 88.82 178.36 7 AMZN 2.03 4.05 4.03 39,400.64 59.72 61.77 8 SPY 24.95 49.91 7.76 32,560.75 113.77 161.68 9 AMZN 11.85 23.70 4.03 39,400.64 71.57 93.27 10 AMZN 21.68 43.36 4.03 39,400.64 93.25 134.61 11 TSLA 27.46 54.91 2.10 74,124.69 188.30 241.21 12 GME 40.13 80.26 16.65 26,857.53 42.13 120.39 13 AMZN 52.90 105.81 4.03 39,400.64 146.15 249.96 14 TSLA 0.10 0.20 2.10 74,124.69 188.40 186.59 ... ... ... ... ... ... ... ... 35 TSLA 38.40 76.79 2.10 74,124.69 417.18 491.97 36 AAPL 93.12 186.25 11.36 26,904.13 133.05 317.30 37 SPY 14.92 29.85 7.76 32,560.75 399.73 427.57 38 AAPL 99.71 199.41 11.36 26,904.13 232.76 430.17 39 TSLA 37.91 75.83 2.10 74,124.69 455.09 528.92 40 GME 64.88 129.75 16.65 26,857.53 261.12 388.88 41 TSLA 96.79 193.58 2.10 74,124.69 551.88 743.46 42 SPY 5.76 11.52 7.76 32,560.75 405.49 415.01 43 TSLA 92.29 184.57 2.10 74,124.69 644.17 826.74 44 AMZN 56.78 113.56 4.03 39,400.64 437.63 549.19 45 AMZN 70.44 140.88 4.03 39,400.64 508.07 646.95 46 TSLA 14.92 29.84 2.10 74,124.69 659.09 686.92 47 AAPL 53.34 106.68 11.36 26,904.13 286.10 390.78 48 TSLA 67.35 134.70 2.10 74,124.69 726.44 859.14 49 TSLA 45.62 91.25 2.10 74,124.69 772.06 861.31 ``apply()`` ^^^^^^^^^^^ If you want your custom function to return multiple aggregations – for example, you want to return both the mean value of one column and the minimum value of another column – use ``apply()``. Warning: Because ``apply()`` isn’t a vectorized operation, it can be slow and use a lot of memory if you’re using it on large amounts of data. Try to avoid it if you can. To be used with ``apply()``, your function must be able to take in a Dataset. It can return a Dataset, a single array, or a dictionary of column names and values. Here’s a function that performs two reducing operations and returns a Dataset:: >>> def my_apply_func(ds): ... new_ds = rt.Dataset({ ... 'Mean_Value': ds.Value.mean(), ... 'Min_Value': ds.Value.min() ... }) ... return new_ds Here it is applied:: >>> ds.Symbol.apply(my_apply_func, ds) *Symbol Mean_Value Min_Value ------- ---------- --------- AAPL 47.35 9.36 AMZN 38.93 2.03 GME 51.82 14.65 SPY 40.35 5.76 TSLA 48.13 0.10 Our second function performs two non-reducing operations:: >>> def my_apply_func2(ds): ... new_ds = rt.Dataset({ ... 'Val1': ds.Value * 3, ... 'Val2': ds.Value * 4 ... }) ... return new_ds >>> ds.Symbol.apply(my_apply_func2, ds) *gb_key_0 Val1 Val2 --------- ------ ------ AAPL 37.16 49.54 SPY 123.13 164.18 AMZN 167.08 222.77 TSLA 279.21 372.28 TSLA 10.87 14.49 TSLA 186.44 248.58 SPY 137.32 183.09 AMZN 6.08 8.10 SPY 74.86 99.82 AMZN 35.55 47.39 AMZN 65.04 86.72 TSLA 82.37 109.83 GME 120.39 160.52 AMZN 158.71 211.62 TSLA 0.30 0.40 ... ... ... TSLA 115.19 153.58 AAPL 279.37 372.50 SPY 44.77 59.69 AAPL 299.12 398.83 TSLA 113.74 151.65 GME 194.63 259.51 TSLA 290.37 387.16 SPY 17.28 23.04 TSLA 276.86 369.15 AMZN 170.34 227.12 AMZN 211.32 281.76 TSLA 44.76 59.67 AAPL 160.02 213.35 TSLA 202.05 269.41 TSLA 136.87 182.50 Because the operations in this function are non-reducing operations, the resulting Dataset is expanded. Note that until a reported bug is fixed, column names might not persist through grouping operations. For more in-depth information about Categoricals, see the `Categoricals User Guide <categoricals_user_guide>`. In the next section, `Accums <tutorial_accums.rst>`__, we look at another way to do multi-key groupings with fancier output. -------------- Questions or comments about this guide? Email RiptableDocumentation@sig.com.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/tutorial_categoricals.rst
0.934005
0.863219
tutorial_categoricals.rst
pypi
Working with Missing Data ========================= When you work with real-world data, you often have to deal with missing values. It’s useful to know how Riptable stores and represents missing values, how to detect missing values in your data, and how you can use a few strategies to fill in missing values so you can continue to work with the data effectively. If you convert data between Riptable and other libraries, it’s also important to know how conversions of missing values are handled. In this section, we show how missing values are converted between Riptable and Pandas. Riptable Sentinel Values ------------------------ Riptable uses sentinel values for missing data. Missing floating-point numbers are ``NaN``\ s (Not a Number), per the IEEE Standard for Floating-Point Arithmetic. In Riptable, missing floating-point numbers are indicated by ``nan``. Missing integers are indicated by ``Inv``:: >>> ds = rt.Dataset({'Ints': [1, 2, 3], 'Floats': [0.1, 1.5, 2.7]}) >>> ds.Ints[0] = ds.Ints.inv >>> ds.Floats[0] = ds.Floats.inv >>> ds # Ints Floats - ---- ------ 0 Inv nan 1 2 1.50 2 3 2.70 Note the difference in how they’re stored. The floating-point ``NaN`` is stored as ``nan``:: >>> ds.Floats FastArray([nan, 1.5, 2.7]) The missing integer is a large negative number:: >>> ds.Ints FastArray([-2147483648, 2, 3]) In Riptable, missing interger values are stored as ``-MAXINT`` for ints and ``MAXINT`` for unsigned ints. This has the potential to cause problems, which we’ll look at below. Tip: To find out what the missing/invalid value is for an array, use ``inv`` property. The array doesn’t necessarily contain the invalid value; what’s returned is the invalid value for the array’s dtype:: >>> ds.Ints.inv -2147483648 Arithmetic with floating-point ``NaN`` values is well-established: any operation involving a ``NaN`` is another ``NaN``:: >>> ds.Floats.sum() >>> ds.FloatsPlus = ds.Floats * 2 >>> ds # Ints Floats FloatsPlus - ---- ------ ---------- 0 Inv nan nan 1 2 1.50 3.00 2 3 2.70 5.40 To help, many arithmetic functions have NaN versions that ignore ``NaN`` values:: >>> ds.Ints.nansum() 5 Be careful with missing integers, however! As of this writing, missing integer values are treated at face value in arithmetic operations:: >>> ds.Ints.sum() -2147483643 Fortunately, the ``NaN`` versions ignore the missing values:: >>> ds.Ints.nansum() 5 There are a few methods for detecting missing values in Riptable structures. For FastArrays, ``isnan()`` and ``notna()`` both return Boolean mask arrays. As you might expect, ``isnan()`` returns True where it finds a ``NaN`` value:: >>> ds.Ints.isnan() FastArray([ True, False, False]) And ``notna()`` returns True where it finds a non-``NaN`` value:: >>> ds.Floats.notna() FastArray([False, True, True]) A more general approach is to use ``isfinite()``. It returns a Boolean array where False indicates either a ``NaN`` or a value of positive or negative infinity:: >>> ds.Floats[1] = np.inf >>> ds.Floats.isfinite() FastArray([False, False, True]) And as you might imagine, ``isnotfinite()`` does the opposite:: >>> ds.Floats.isnotfinite() FastArray([ True, True, False]) Note that ``inf`` is not considered a ``NaN``. The ``NaN`` versions of functions don’t ignore infinite values (the result is positive or negative ``inf``), so it can be good to check for them:: >>> ds.Floats.nansum() inf For Datasets, ``mask_and_isnan()`` and ``mask_or_isnan()`` each return a FastArray of Booleans with a value for each row. ``mask_and_isnan()`` returns True for each row in which every value is ``NaN``:: >>> ds.mask_and_isnan() FastArray([ True, False, False]) ``mask_or_isnan()`` returns True for each row in which at least one value is ``NaN``:: ds.mask_or_isnan() FastArray([ True, False, False]) Merging with Missing Values --------------------------- Missing values are not equivalent:: >>> rt.nan == rt.nan False This is true for integer invalid values, string invalid values, filtered values of a Categorical, etc. That means that merge functions do not treat invalid keys as equal values. For example, these two Datasets each have an invalid floating-point value in the Key column:: >>> ds1 = rt.Dataset({'Key': [1.0, rt.nan, 2.0], ... 'Value1': ['a', 'b', 'c']}) >>> ds2 = rt.Dataset({'Key': [1.0, 2.0, rt.nan], ... 'Value2': [1, 2, 3]}) Now we do a ``merge_lookup()`` on the Key columns:: >>> ds1.merge_lookup(ds2, on='Key') # Key Value1 Value2 - ---- ------ ------ 0 1.00 a 1 1 nan b Inv 2 2.00 c 2 The ``NaN`` key and its associated value in ``ds2`` were ignored, and the invalid integer value was filled in. Replacing Missing Values ------------------------ For both FastArrays and Datasets, calling ``fillna()`` with a constant is a quick way to replace missing values:: >>> ds.fillna(123) # Ints Floats FloatsPlus - ---- ------ ---------- 0 123 123.00 123.00 1 2 inf 3.00 2 3 2.70 5.40 Note that by default ``fillna()`` returns a copy; to modify the original data, use ``inplace=True``. For a little more nuance in how the gaps are filled, use ``fillna()`` with ``method='ffill'`` or ``method='bfill'``. ``fillna(method='ffill')`` propagates non-``NaN`` values forward:: >>> rt.FA([1.0, 2.0, np.nan, 4.0, 5.0]).fillna(method='ffill') FastArray([1., 2., 2., 4., 5.]) ``fillna(method='bfill')`` propagates non-NaN values backward:: >>> rt.FA([1.0, 2.0, np.nan, 4.0, 5.0]).fillna(method='bfill') FastArray([1., 2., 4., 4., 5.]) For Categoricals, ``fill_forward()`` and ``fill_backward()`` propagate values within categories:: >>> # Create a Categorical with a NaN in each category >>> ds = rt.Dataset() >>> ds.Cat = rt.Cat(['A', 'B', 'A', 'B', 'A', 'B']) >>> ds.x = rt.FA([1, 4, rt.nan, rt.nan, 9, 16]) >>> ds # Cat x - --- ----- 0 A 1.00 1 B 4.00 2 A nan 3 B nan 4 A 9.00 5 B 16.00 Propagate forward the last encountered non-``NaN`` value for the category:: >>> ds.Cat.fill_forward(ds.x) *gb_key_0 x --------- ----- A 1.00 B 4.00 A 1.00 B 4.00 A 9.00 B 16.00 Note that until a reported bug is fixed, explicit column name declarations might not be displayed for grouping operations. Propagate backward the next encountered non-NaN value for the category:: >>> ds.Cat.fill_backward(ds.x) *gb_key_0 x --------- ----- A 1.00 B 4.00 A 9.00 B 16.00 A 9.00 B 16.00 Both ``fill_forward()`` and ``fill_backward()`` can take a list of arrays to fill, and both can modify data in place with ``inplace=True``. Note that if there is no value available to propagate forward or backward, the ``NaN`` value isn’t changed:: >>> ds.x[1] = rt.nan >>> ds.Cat.fill_forward(ds.x) *gb_key_0 x --------- ----- A 1.00 B nan A 1.00 B nan A 9.00 B 16.00 Convert Missing Values to/from Pandas ------------------------------------- This section covers some things to be aware of when you convert data with missing values between Pandas and Riptable. Note that while you can convert Pandas DataFrames to Riptable Datasets using Riptable’s Dataset constructor, you should use the Dataset methods ``to_pandas`` and ``from_pandas`` to convert data with missing values. Converting Floats ~~~~~~~~~~~~~~~~~ To represent missing floating-point values, both Pandas and Riptable use the special floating-point ``NaN`` value that’s part of the IEEE standard (though in Riptable, it’s displayed as ``nan``). Converting floating-point ``NaN`` values between Pandas and Riptable poses no issues:: >>> df = pd.DataFrame({'A': [0.0, np.nan, 1.0]}) >>> ds = rt.Dataset.from_pandas(df) >>> ds # A - ---- 0 0.00 1 nan 2 1.00 >>> df_again = ds.to_pandas() >>> df_again A 0 0.0 1 NaN 2 1.0 Converting Integers ~~~~~~~~~~~~~~~~~~~ Converting integers gets more interesting. Pandas has a new nullable integer data type (Int64, not to be confused with NumPy’s int64 dtype). A missing value in an Int64 column is represented by the native ``pd.NA`` value and displayed as ``<NA>``. Before this new dtype was created, the only numeric ``NaN`` used by Pandas was a floating-point ``NaN``, so any ``NaN`` value added to an integer array in Pandas would cause the array to become an array of floating-point numbers:: >>> s1 = pd.Series([1, 2, 3, 4, 5]) >>> s1[1] = np.nan >>> s1 0 1.0 1 NaN 2 3.0 3 4.0 4 5.0 dtype: float64 Since this is now just a column of floats, converting it to Riptable is just as shown above. Now, in Pandas, you can specify the new Int64 dtype (it’s not yet used by default). Missing values are represented by ``pd.NA``, displayed as ``<NA>``:: >>> s2 = pd.Series([1, 2, 3, 4, 5], dtype='Int64') >>> s2[1] = np.nan >>> s2 0 1 1 <NA> 2 3 3 4 4 5 dtype: Int64 When we convert these to Riptable, the Int64 ``<NA>`` remains an integer (but now the int64 dtype):: >>> # Create a DataFrame with the series from above. >>> df = pd.DataFrame({'Float': s1, 'Int64': s2}) >>> # Convert the DataFrame to a Riptable Dataset and display its dtypes. >>> ds2 = rt.Dataset.from_pandas(df) >>> ds2.dtypes {'Float': dtype('float64'), 'Int64': dtype('int64')} When you convert data with missing integer values from Riptable to Pandas, by default ``to_pandas()`` converts to the new Int64 dtype:: >>> df_again2 = ds2.to_pandas() >>> df_again2.dtypes Float float64 Int64 Int64 dtype: object You can choose to not convert to the new nullable dtype, but your integers might not be very useful:: >>> df_again3 = ds2.to_pandas(use_nullable=False) >>> df_again3 Float Int64 0 1.0 1 1 NaN -9223372036854775808 2 3.0 3 3 4.0 4 4 5.0 5 Converting Datetimes ~~~~~~~~~~~~~~~~~~~~ In Pandas, missing datetime values are represented as ``NaT``. When those are converted to Riptable, they become an ``Inv``:: >>> date_arr = pd.Series(pd.to_datetime(['01/01/2022', '02/01/2022', np.nan])) >>> df2 = pd.DataFrame({'Timestamp': date_arr}) >>> ds3 = rt.Dataset.from_pandas(df2) >>> ds3 # Timestamp - --------------------------- 0 20220101 00:00:00.000000000 1 20220201 00:00:00.000000000 2 Inv The missing value becomes ``NaT`` again when converted back to Pandas:: >>> df_again3 = ds3.to_pandas() >>> df_again3 Timestamp 0 2022-01-01 00:00:00+00:00 1 2022-02-01 00:00:00+00:00 2 NaT Converting Missing Booleans and Strings from Pandas to Riptable ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: >>> str_arr = pd.Series(["aaa", "bbb"]) >>> bool_arr = pd.Series([True, False]) >>> df = pd.DataFrame({"Strings": str_arr, "Bools": bool_arr}) >>> df2 = df.reindex({0, 1, 2}) # Add a row of missing values >>> df2 Strings Bools 0 aaa True 1 bbb False 2 NaN NaN When we convert Pandas ``NaN`` strings and Booleans to Riptable, the results are perhaps not quite what we expect:: >>> ds = rt.Dataset.from_pandas(df2) >>> ds # Strings Bools - ------- ----- 0 aaa 1.00 1 bbb 0.00 2 nan nan As you can see, the Boolean column became a column of floating-point values with an ``rt.nan``. If we try to recast the values, we get an unexpected result:: >>> ds.Bools = ds.Bools.astype(bool) >>> ds # Strings Bools - ------- ----- 0 aaa True 1 bbb False 2 nan True As for the “nan” in the Strings column, it is a string literal:: >>> ds.Strings FastArray([b'aaa', b'bbb', b'nan'], dtype='|S3') One way to avoid getting the string literal is to replace the missing value in Pandas (with a space, for example). Another way to deal with these values is to create a Boolean column that’s True if the Pandas object is a ``NaN``, then use that column as a mask array. **Riptable NaN values** - Int: -MAXINT (signed), MAXINT (unsigned) - Float: nan - String: b’’ - Bool: False - Date (stored as int): -MAXINT - DTN (stored as int): -MAXINT - TS (stored as float): nan Next we cover a few ways to `Instantiate with Placeholder Values and Generate Sample Data <tutorial_sample_data.rst>`__. -------------- Questions or comments about this guide? Email RiptableDocumentation@sig.com.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/tutorial_missing_data.rst
0.930891
0.956917
tutorial_missing_data.rst
pypi
Work with Riptable Files and Other File Formats =============================================== SDS is Riptable’s native file format, and it’s the only data format fully supported directly within Riptable. That said, there are ways to get data that’s in other formats in and out of Riptable. SDS --- We’ll start with the most straightforward case – saving and loading SDS files. You can save Datasets, FastArrays, or Structs. Create a Dataset:: >>> ds = rt.Dataset({'Ints': rt.arange(10, dtype=int), 'Floats': rt.arange(1, step=0.1), ... 'Categoricals': rt.Categorical(['a','a','b','a','c','c','b','a','a','b'])}) >>> ds # Ints Floats Categoricals - ---- ------ ------------ 0 0 0.00 a 1 1 0.10 a 2 2 0.20 b 3 3 0.30 a 4 4 0.40 c 5 5 0.50 c 6 6 0.60 b 7 7 0.70 a 8 8 0.80 a 9 9 0.90 b Save the Dataset:: >>> ds.save('ds.sds') Load the Dataset:: >>> ds_load_ds = rt.load_sds('ds.sds') >>> ds_load_ds # Ints Floats Categoricals - ---- ------ ------------ 0 0 0.00 a 1 1 0.10 a 2 2 0.20 b 3 3 0.30 a 4 4 0.40 c 5 5 0.50 c 6 6 0.60 b 7 7 0.70 a 8 8 0.80 a 9 9 0.90 b Load a subset of columns:: >>> rt.load_sds('ds.sds', include=['Ints', 'Categoricals']) # Ints Categoricals - ---- ------------ 0 0 a 1 1 a 2 2 b 3 3 a 4 4 c 5 5 c 6 6 b 7 7 a 8 8 a 9 9 b Create a FastArray:: >>> fa = rt.FastArray(np.arange(10)) >>> fa FastArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) Save the FastArray:: >>> fa.save('fa.sds') Load the FastArray:: >>> fa_load_sds = rt.load_sds('fa.sds') >>> fa_load_sds FastArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32) Warning: Multi-key Categoricals can’t be saved in SDS files. When you try to load the SDS file, it fails with an error: “Categories dict was empty.” CSV Files --------- Saving to CSV isn’t supported by Riptable, but you can do it by first converting your Riptable Dataset to a Pandas DataFrame, then calling the Pandas ``to_csv()`` method. Later, you can load your CSV file into Riptable as a Dataset. Note that Categorical information will be lost in the ``to_csv()`` process. When you load the CSV file into Riptable as a Dataset, any Categorical column will be a FastArray. You can always change the FastArray back into a Categorical in Riptable. The ``index`` parameter for the ``to_csv()`` method indicates whether you want to write row (index) names. Because Riptable doesn’t use explicit row indexing, set ``index=False``. Convert the Dataset to a Pandas DataFrame, then save the DataFrame as a CSV:: >>> ds.to_pandas().to_csv('ds.csv', index=False) Read the CSV a into Pandas DataFrame, then convert the DataFrame to a Riptable Dataset using the Dataset constructor:: >>> ds_from_csv = rt.Dataset(pd.read_csv('ds.csv')) >>> ds_from_csv # Ints Floats Categoricals - ---- ------ ------------ 0 0 0.00 a 1 1 0.10 a 2 2 0.20 b 3 3 0.30 a 4 4 0.40 c 5 5 0.50 c 6 6 0.60 b 7 7 0.70 a 8 8 0.80 a 9 9 0.90 b As you can see, the Categorical is now a FastArray:: >>> ds_from_csv.Categoricals FastArray([b'a', b'a', b'b', b'a', b'c', b'c', b'b', b'a', b'a', b'b'], dtype='|S1') But we can change it back:: >>> ds_from_csv.Categoricals = rt.Cat(ds_from_csv.Categoricals) >>> ds_from_csv.Categoricals Categorical([a, a, b, a, c, c, b, a, a, b]) Length: 10 FastArray([1, 1, 2, 1, 3, 3, 2, 1, 1, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 SQL Files --------- Working with SQL files and Riptable is much like working with CSV files and Riptable. To save a Riptable Dataset to SQL format, first convert the Dataset to a Pandas DataFrame, then use the Pandas ``to_SQL()`` method to save it. To get the file back into Riptable, first load it in Pandas as a DataFrame using ``read_csv()``, then convert it to a Riptable Dataset. H5 Files -------- H5 files can be loaded in Riptable using ``rt.load_h5()``. To save your data as an H5 file, convert to Pandas and use the Pandas ``to_h5()`` method. NPY Files --------- Like Pandas, NumPy has various IO tools for saving and loading data. See the `NumPy docs <https://numpy.org/doc/stable/user/basics.io.html?highlight=import>`__ for details. Note that Riptable can initialize Datasets only from NumPy arrays that are record arrays. Convert data for Use in Other Libraries --------------------------------------- Sometimes, you need to access a function available only in NumPy or Pandas. Here’s how to convert a Riptable data structure to its equivalent in NumPy or Pandas, and then back to Riptable. Riptable FastArray to/from NumPy Array ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we first introduced FastArrays, we created one from a NumPy array:: >>> my_fa = rt.FA(np.array([0.1, 0.2, 0.3])) To access a FastArray’s underlying NumPy array, use ``_np``:: >>> np_arr = my_fa._np >>> np_arr array([0.1, 0.2, 0.3]) This is the same result you’d get in Pandas by calling ``Series.values``. Riptable Dataset to/from NumPy Array ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Converting a Dataset to a 2-dimensional NumPy array is a two-step process. First, use ``imatrix_make()`` to convert the Dataset to a 2-dimensional FastArray (``imatrix_make()`` saves only the values – your column names will be lost). FastArrays above 1-d are not technically supported by Riptable, so don’t stop here! Convert the FastArray to a NumPy array with ``._np``:: >>> ds1 = rt.Dataset({'A':[0,6,9], 'B': [1.2,3.1,9.6], 'C':[-1.6,2.7,4.6], 'D': [2.4,6.2,19.2]}) >>> np_2d_arr = ds1.imatrix_make()._np >>> np_2d_arr array([[ 0. , 1.2, -1.6, 2.4], [ 6. , 3.1, 2.7, 6.2], [ 9. , 9.6, 4.6, 19.2]]) A few things to note about ``imatrix_make()``: - As noted above, imatrix_make saves only column values, not column names. - Non-numerical columns are ignored. - You can specify which columns to convert: ``ds1[['A', 'B']].imatrix_make()._np`` - Watch out for integer columns! Since NumPy arrays can’t have mixed types, if your ``imatrix_make`` input contains any float columns, the entire array will be converted to floats. It’s also possible that the integers in your original Dataset will be converted. - Also watch out for NaNs in integer columns (“Inv”). “Inv” is stored internally by Riptable as an out-of-bounds number, and it will be sent to NumPy as that number. See `Working with Missing Data <tutorial_missing_data.rst>`__ for more on dealing with NaNs. - If there are Categoricals in the Dataset, you can preserve the integer mapping codes by passing ``cats=True``. To convert a 2-dimensional NumPy array back to Riptable, add it to a Dataset using ``add_matrix()``:: >>> ds2 = rt.Dataset() >>> ds2.add_matrix(np_2d_arr) >>> ds2 # col_0 col_1 col_2 col_3 - ----- ----- ----- ----- 0 0.00 1.20 -1.60 2.40 1 6.00 3.10 2.70 6.20 2 9.00 9.60 4.60 19.20 To add it with rows and columns transposed:: >>> ds3 = rt.Dataset() >>> ds3.add_matrix(np_2d_arr.T) >>> ds3 C:\\riptable\\rt_fastarray.py:561: UserWarning: FastArray initialized with strides. warnings.warn(warning_string) # col_0 col_1 col_2 - ----- ----- ----- 0 0.00 6.00 9.00 1 1.20 3.10 9.60 2 -1.60 2.70 4.60 3 2.40 6.20 19.20 Riptable Dataset to/from Pandas DataFrame ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally, you can use ``from_pandas()`` and ``to_pandas()`` to convert a Pandas DataFrame to a Riptable Dataset and vice-versa. We’ll create a Pandas DataFrame with categorical, timestamp, float and integer columns. We won’t deal with NaN values here – see `Working with Missing Data <tutorial_missing_data.rst>`__ for guidance:: >>> rng = np.random.default_rng(seed=42) >>> N = 10 >>> dates = pd.date_range('20191111','20191119') >>> df = pd.DataFrame( ... dict(Time = rng.choice(dates, N), ... Symbol = pd.Categorical(rng.choice(['SPY','IBM'], N)), ... Exchange = pd.Categorical(rng.choice(['AMEX','NYSE'], N)), ... TradeSize = rng.choice([1,5,10], N), ... TradePrice = rng.choice([1.1,2.2,3.3], N), ... ) ... ) >>> df Time Symbol Exchange TradeSize TradePrice 0 2019-11-11 IBM NYSE 5 1.1 1 2019-11-17 IBM AMEX 1 3.3 2 2019-11-16 IBM AMEX 1 3.3 3 2019-11-14 IBM NYSE 5 2.2 4 2019-11-14 IBM NYSE 10 1.1 5 2019-11-18 IBM NYSE 1 3.3 6 2019-11-11 IBM AMEX 10 2.2 7 2019-11-17 SPY NYSE 10 3.3 8 2019-11-12 IBM NYSE 1 3.3 9 2019-11-11 SPY AMEX 5 3.3 The DataFrame dtypes before conversion:: >>> df.dtypes Time datetime64[ns] Symbol category Exchange category TradeSize int32 TradePrice float64 dtype: object Use ``from_pandas()`` to convert to a Dataset:: >>> ds = rt.Dataset.from_pandas(df) >>> ds.head(5) # Time Symbol Exchange TradeSize TradePrice - --------------------------- ------ -------- --------- ---------- 0 20191111 00:00:00.000000000 IBM NYSE 5 1.10 1 20191117 00:00:00.000000000 IBM AMEX 1 3.30 2 20191116 00:00:00.000000000 IBM AMEX 1 3.30 3 20191114 00:00:00.000000000 IBM NYSE 5 2.20 4 20191114 00:00:00.000000000 IBM NYSE 10 1.10 Note: You can also convert a Pandas DataFrame in the Dataset constructor, but only if the DataFrame has no null values:: >>> ds = rt.Dataset(df) If we check the Dataset dtypes after conversion, we see only the underlying NumPy data type:: >>> ds.dtypes {'Time': dtype('int64'), 'Symbol': dtype('int8'), 'Exchange': dtype('int8'), 'TradeSize': dtype('int32'), 'TradePrice': dtype('float64')} To see the Riptable column types, we’ll use a Python list comprehension:: >>> {(c,ds[c].dtype ,type(ds[c])) for c in ds.keys()} {('Exchange', dtype('int8'), riptable.rt_categorical.Categorical), ('Symbol', dtype('int8'), riptable.rt_categorical.Categorical), ('Time', dtype('int64'), riptable.rt_datetime.DateTimeNano), ('TradePrice', dtype('float64'), riptable.rt_fastarray.FastArray), ('TradeSize', dtype('int32'), riptable.rt_fastarray.FastArray)} Use ``to_pandas()`` to convert the Dataset back to a Pandas DataFrame:: >>> df1 = ds.to_pandas() >>> df1.dtypes Time datetime64[ns, GMT] Symbol category Exchange category TradeSize Int32 TradePrice float64 dtype: object Convert Dates to/from Matlab (and Other Libraries) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use Matlab (or another library) to visualize data by date, convert the Riptable Date objects to an array of integers:: >>> dates = rt.Date(ds.Time) >>> int_dates = dates.yyyymmdd >>> int_dates.dtype dtype('int32') MATLAB stores dates as days since 0000-01-01. To convert an array of Matlab datenums to a Riptable ``Date`` object, first convert the datenums to a FastArray, then to a Date object using the ``from_matlab`` keyword argument:: >>> dates = rt.FA([737061.0, 737062.0, 737063.0, 737064.0, 737065.0]) >>> rt_dates = rt.Date(dates, from_matlab=True) >>> rt_dates Date(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05']) Next, we review some things to keep in mind to get the best performance out of Riptable: `Performance Considerations <tutorial_performance.rst>`__. -------------- Questions or comments about this guide? Email RiptableDocumentation@sig.com.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/tutorial_io.rst
0.70028
0.690996
tutorial_io.rst
pypi
Accums ====== Accums aggregate data similarly to Categoricals, but they distinguish themselves by providing a fancier output with overall aggregates in summary footers and columns. ``Accum2()`` ------------ ``Accum2()`` is very much like a multi-key Categorical: It computes aggregates values for pairs of groups. The difference is in the output – an ``Accum2()`` result looks more like a pivot table, with the first group passed to the function providing row labels and the second providing the column labels. The function is also applied to each row and column, with results shown in a summary column and row, as well as to all columns and rows combined (with the result shown in the bottom right corner). We’ll use a Dataset that’s similar to the one we used for multi-key Categoricals, so we can compare the output:: >>> rng = np.random.default_rng(seed=42) >>> ds = rt.Dataset() >>> N = 100 >>> ds.Symbol = rt.FA(rng.choice(['AAPL', 'AMZN', 'MSFT'], N)) >>> ds.Value = rt.FA(rng.random(N)) >>> ds.Date = rt.Date.range('20210101', days = 100) # Dates from January to mid-April >>> ds.Month = ds.Date.start_of_month >>> # Accum2 can take Categoricals or FastArrays as input. >>> # To use this ds for accum_ratio, we need Symbol and Month to be Categoricals. >>> ds.Symbol = rt.Cat(ds.Symbol) >>> ds.Month = rt.Cat(ds.Month) >>> ds # Symbol Value Date Month --- ------ ----- ---------- ---------- 0 AAPL 0.20 2021-01-01 2021-01-01 1 MSFT 0.01 2021-01-02 2021-01-01 2 AMZN 0.79 2021-01-03 2021-01-01 3 AMZN 0.66 2021-01-04 2021-01-01 4 AMZN 0.71 2021-01-05 2021-01-01 5 MSFT 0.78 2021-01-06 2021-01-01 6 AAPL 0.46 2021-01-07 2021-01-01 7 MSFT 0.57 2021-01-08 2021-01-01 8 AAPL 0.14 2021-01-09 2021-01-01 9 AAPL 0.11 2021-01-10 2021-01-01 10 AMZN 0.67 2021-01-11 2021-01-01 11 MSFT 0.47 2021-01-12 2021-01-01 12 MSFT 0.57 2021-01-13 2021-01-01 13 MSFT 0.76 2021-01-14 2021-01-01 14 MSFT 0.63 2021-01-15 2021-01-01 ... ... ... ... ... 85 MSFT 0.02 2021-03-27 2021-03-01 86 AAPL 0.96 2021-03-28 2021-03-01 87 AAPL 0.48 2021-03-29 2021-03-01 88 MSFT 0.78 2021-03-30 2021-03-01 89 MSFT 0.08 2021-03-31 2021-03-01 90 AMZN 0.49 2021-04-01 2021-04-01 91 MSFT 0.49 2021-04-02 2021-04-01 92 MSFT 0.94 2021-04-03 2021-04-01 93 AMZN 0.57 2021-04-04 2021-04-01 94 MSFT 0.47 2021-04-05 2021-04-01 95 AAPL 0.27 2021-04-06 2021-04-01 96 AAPL 0.33 2021-04-07 2021-04-01 97 MSFT 0.52 2021-04-08 2021-04-01 98 AMZN 0.44 2021-04-09 2021-04-01 99 AAPL 0.02 2021-04-10 2021-04-01 Here’s the ``Accum2()`` table before we apply an aggregation function. You can see how many values fall into each group pair:: >>> rt.Accum2(ds.Symbol, ds.Month) Accum2 Keys X:Date(['2021-01-01', '2021-02-01', '2021-03-01', '2021-04-01']) Y:FastArray([b'AAPL', b'AMZN', b'MSFT'], dtype='|S4') Bins:20 Rows:100 *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 Sum ------- ---------- ---------- ---------- ---------- --- AAPL 6 9 9 3 27 AMZN 13 8 9 3 33 MSFT 12 11 13 4 40 If we aggregate with ``count()``, it has the same data, but we see the output formatting:: >>> rt.Accum2(ds.Symbol, ds.Month).count() *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 Sum ------- ---------- ---------- ---------- ---------- --- AAPL 6 9 9 3 27 AMZN 13 8 9 3 33 MSFT 12 11 13 4 40 Sum 31 28 31 10 100 The bottom row and rightmost column provide summary data. Now we’ll get the average value per symbol-month pair:: >>> rt.Accum2(ds.Symbol, ds.Month).mean(ds.Value) *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 Mean ------- ---------- ---------- ---------- ---------- ---- AAPL 0.35 0.40 0.54 0.21 0.41 AMZN 0.54 0.48 0.45 0.50 0.50 MSFT 0.44 0.47 0.42 0.61 0.46 Mean 0.47 0.45 0.46 0.45 0.46 Note that the summary row and column show the mean values for all the input values for each group, not just the means of the displayed group means. To illustrate: Here’s the mean of the displayed group mean values for AAPL:: >>> (0.35 + 0.40 + 0.54 + 0.21) / 4 0.375 And here’s the mean of all AAPL values:: >>> ds.Value.nanmean(filter=ds.Symbol == 'AAPL') 0.41317486824408933 For comparison, here’s the multi-key Categorical version:: >>> ds.Symbol_Month = rt.Cat([ds.Symbol, ds.Month]) >>> ds.Symbol_Month.mean(ds.Value) *Symbol *Month Value ------- ---------- ----- AAPL 2021-01-01 0.35 MSFT 2021-01-01 0.44 AMZN 2021-01-01 0.54 AAPL 2021-02-01 0.40 AMZN 2021-02-01 0.48 MSFT 2021-02-01 0.47 . 2021-03-01 0.42 AMZN 2021-03-01 0.45 AAPL 2021-03-01 0.54 AMZN 2021-04-01 0.50 MSFT 2021-04-01 0.61 AAPL 2021-04-01 0.21 You can pass a filter keyword argument to the function you call on ``Accum2()``:: >>> rt.Accum2(ds.Symbol, ds.Month).mean(ds.Value, filter=ds.Value > 0.5) *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 Mean ------- ---------- ---------- ---------- ---------- ---- AAPL 0.85 0.74 0.76 nan 0.77 AMZN 0.67 0.75 0.72 0.57 0.69 MSFT 0.65 0.78 0.70 0.73 0.71 Mean 0.67 0.76 0.72 0.68 0.71 ``accum_ratio()`` ----------------- For each group pair, ``accum_ratio()`` computes a ratio of values you specify. The results are presented in an Accum table. For our example we’ll add PnL and Size (number of sales) columns, and we’ll use ``accum_ratio()`` to get the PnL for each symbol-month bucket, weighted by size:: >>> ds.PnL = rng.normal(10, 20, 100) >>> ds.Size = rng.random(100) * 100 Like ``Accum2()``, ``accum_ratio()`` takes two Categoricals (a row Categorical and a column Categorical). You also specify the numerator values and denominator values. For each group pair, it sums the numerator values and denominator values and presents the ratios in a table:: >>> rt.accum_ratio(ds.Symbol, ds.Month, ds.PnL * ds.Size, ds.Size, include_numer=True) *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 Ratio Numer Denom ------- ---------- ---------- ---------- ---------- ----- --------- -------- AAPL 3.13 11.93 1.95 28.86 8.81 12,363.71 1,404.13 AMZN 5.54 2.36 23.34 -2.94 10.01 16,971.55 1,695.67 MSFT 23.90 22.78 -1.40 -9.61 10.35 17,501.11 1,690.46 Ratio 10.13 13.17 7.31 8.25 9.78 Numer 10,604.13 18,953.08 13,471.17 3,807.98 46,836.36 Denom 1,047.18 1,438.84 1,842.65 461.59 4,790.26 The result is the ratio of the following two tables. Numerator:: >>> rt.Accum2(ds.Symbol, ds.Month).nansum(ds.Size * ds.PnL) *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 Nansum ------- ---------- ---------- ---------- ---------- --------- AAPL 699.07 5,075.98 1,100.76 5,487.90 12,363.71 AMZN 2,956.74 1,065.03 13,358.59 -408.81 16,971.55 MSFT 6,948.32 12,812.08 -988.18 -1,271.11 17,501.11 Nansum 10,604.13 18,953.08 13,471.17 3,807.98 46,836.36 Denominator:: >>> rt.Accum2(ds.Symbol, ds.Month).nansum(ds.Size) *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 Nansum ------- ---------- ---------- ---------- ---------- -------- AAPL 223.12 425.49 565.38 190.13 1,404.13 AMZN 533.28 450.83 572.34 139.22 1,695.67 MSFT 290.78 562.52 704.92 132.24 1,690.46 Nansum 1,047.18 1,438.84 1,842.65 461.59 4,790.26 When the numerator and denominator are the same, the result is as you might expect:: >>> rt.accum_ratio(ds.Symbol, ds.Month, ds.Size, ds.Size, include_numer=True) *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 Ratio Numer Denom ------- ---------- ---------- ---------- ---------- ----- -------- -------- AAPL 1.00 1.00 1.00 1.00 1.00 1,404.13 1,404.13 AMZN 1.00 1.00 1.00 1.00 1.00 1,695.67 1,695.67 MSFT 1.00 1.00 1.00 1.00 1.00 1,690.46 1,690.46 Ratio 1.00 1.00 1.00 1.00 1.00 Numer 1,047.18 1,438.84 1,842.65 461.59 4,790.26 Denom 1,047.18 1,438.84 1,842.65 461.59 4,790.26 ``accum_ratiop()`` ------------------ ``accum_ratiop()`` takes one column of values as numerators and computes an internal ratio for each group pair, where the denominator is one of three sums: - The row sum (``norm_by='R'``) - The column sum (``norm_by='C'``) - The total sum (``norm_by='T'``) For example, this table shows that 30.30% of AAPL sales were in February:: >>> rt.accum_ratiop(ds.Symbol, ds.Month, ds.Size, norm_by='R') *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 TotalRatio Total ---------- ---------- ---------- ---------- ---------- ---------- -------- AAPL 15.89 30.30 40.27 13.54 100.00 1,404.13 AMZN 31.45 26.59 33.75 8.21 100.00 1,695.67 MSFT 17.20 33.28 41.70 7.82 100.00 1,690.46 TotalRatio 21.86 30.04 38.47 9.64 100.00 Total 1,047.18 1,438.84 1,842.65 461.59 4,790.26 Note that the percentages in each row sum to 100%. We can check the math by computing the ratio of AAPL’s February sales to AAPL’s total sales:: >>> filt_feb_aapl = (ds.Symbol == 'AAPL') & (ds.Month.as_string_array == rt.Date('20210201')) >>> filt_total_aapl = ds.Symbol == 'AAPL' >>> ds.Size[filt_feb_aapl].nansum() / ds.Size[filt_total_aapl].nansum() 0.3030291108538412 This table shows that AAPL’s sales are 29.57% of February sales:: >>> rt.accum_ratiop(ds.Symbol, ds.Month, ds.Size, norm_by='C') *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 TotalRatio Total ---------- ---------- ---------- ---------- ---------- ---------- -------- AAPL 21.31 29.57 30.68 41.19 29.31 1,404.13 AMZN 50.93 31.33 31.06 30.16 35.40 1,695.67 MSFT 27.77 39.10 38.26 28.65 35.29 1,690.46 TotalRatio 100.00 100.00 100.00 100.00 100.00 Total 1,047.18 1,438.84 1,842.65 461.59 4,790.26 Note that the percentages in each column sum to 100%. Check the math:: >>> filt_feb_total = ds.Month.as_string_array == rt.Date('20210201') >>> ds.Size[filt_feb_aapl].nansum() / ds.Size[filt_feb_total].nansum() 0.29571866540362846 This table shows that AAPL’s February sales represent 8.88% of all sales:: >>> rt.accum_ratiop(ds.Symbol, ds.Month, ds.Size, norm_by='T') *Symbol 2021-01-01 2021-02-01 2021-03-01 2021-04-01 TotalRatio Total ---------- ---------- ---------- ---------- ---------- ---------- -------- AAPL 4.66 8.88 11.80 3.97 29.31 1,404.13 AMZN 11.13 9.41 11.95 2.91 35.40 1,695.67 MSFT 6.07 11.74 14.72 2.76 35.29 1,690.46 TotalRatio 21.86 30.04 38.47 9.64 100.00 Total 1,047.18 1,438.84 1,842.65 461.59 4,790.26 Note that the “TotalRatio” row and column percentages each sum to 100%. Check the math:: >>> ds.Size[filt_feb_aapl].nansum() / ds.Size.nansum() 0.08882445025331744 Next, for something completely different, we’ll explore ways to `Concatenate Datasets <tutorial_concat.rst>`__. -------------- Questions or comments about this guide? Email RiptableDocumentation@sig.com.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/tutorial_accums.rst
0.904102
0.721081
tutorial_accums.rst
pypi
Concatenate Datasets ==================== Concatentating Datasets is straightforward, with two Dataset methods: You can concatenate rows (vertically) with ``concat_rows()`` or columns (horizontally) with ``concat_columns()``. However, it’s good to be aware of what happens when you concatenate two Datasets that have different shapes or column names, so we’ll look at a few examples. Concatenate Rows ---------------- Concatenating data row-wise is sometimes called vertical stacking. When two Datasets have identical column names, ``concat_rows()`` simply stacks the data:: >>> ds1 = rt.Dataset({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}) >>> ds2 = rt.Dataset({'A': ['A3', 'A4', 'A5'], 'B': ['B3', 'B4', 'B5']}) >>> ds1 # A B - -- -- 0 A0 B0 1 A1 B1 2 A2 B2 >>> ds2 # A B - -- -- 0 A3 B3 1 A4 B4 2 A5 B5 >>> rt.Dataset.concat_rows([ds1, ds2]) # A B - -- -- 0 A0 B0 1 A1 B1 2 A2 B2 3 A3 B3 4 A4 B4 5 A5 B5 When the two Datasets have only some column names in common, the result has a gap in the data:: >>> # Create two Datasets with two out of three columns in common. >>> ds3 = rt.Dataset({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}) >>> ds4 = rt.Dataset({'A': ['A3', 'A4', 'A5'], 'B': ['B3', 'B4', 'B5'], 'C': ['C3', 'C4', 'C5'] }) >>> ds3 # A B - -- -- 0 A0 B0 1 A1 B1 2 A2 B2 >>> ds4 # A B C - -- -- -- 0 A3 B3 C3 1 A4 B4 C4 2 A5 B5 C5 >>> rt.Dataset.concat_rows([ds3, ds4]) # A B C - -- -- -- 0 A0 B0 1 A1 B1 2 A2 B2 3 A3 B3 C3 4 A4 B4 C4 5 A5 B5 C5 As you can see, Riptable’s missing string value is ’’. If the values were floats, the empty spots would be filled with ``nan``\ s:: >>> rng = np.random.default_rng(seed=42) >>> ds5 = rt.Dataset({'col_'+str(i):rng.random(2) for i in range(2)}) >>> ds6 = rt.Dataset({'col_'+str(i):rng.random(2) for i in range(3)}) >>> ds5 # col_0 col_1 - ----- ----- 0 0.77 0.86 1 0.44 0.70 >>> ds6 # col_0 col_1 col_2 - ----- ----- ----- 0 0.09 0.76 0.13 1 0.98 0.79 0.45 >>> rt.Dataset.concat_rows([ds5, ds6]) # col_0 col_1 col_2 - ----- ----- ----- 0 0.77 0.86 nan 1 0.44 0.70 nan 2 0.09 0.76 0.13 3 0.98 0.79 0.45 See `Working with Missing Data <tutorial_missing_data.rst>`__ for more about what to expect when you have missing values in Riptable. You can also concatenate datasets row-wise with Categoricals if the Datasets have identical column names:: >>> a = rt.Cat(['a', 'a', 'a', 'b', 'b']) >>> b = rt.FA([0, 1, 2, 3, 4]) >>> ds10 = rt.Dataset({'Cat': a, 'Val': b}) >>> c = rt.Cat(['c', 'c', 'c', 'd', 'd']) >>> d = rt.FA([5, 6, 7, 8, 9]) >>> ds11 = rt.Dataset({'Cat': c, 'Val': d}) >>> ds10 # Cat Val - --- --- 0 a 0 1 a 1 2 a 2 3 b 3 4 b 4 >>> ds11 # Cat Val - --- --- 0 c 5 1 c 6 2 c 7 3 d 8 4 d 9 >>> rt.Dataset.concat_rows([ds10, ds11]) # Cat Val - --- --- 0 a 0 1 a 1 2 a 2 3 b 3 4 b 4 5 c 5 6 c 6 7 c 7 8 d 8 9 d 9 Concatenate Columns ------------------- Concatenating data column-wise is also called horizontal stacking. It’s most straightforward when you’re concatenating two Datasets that have no column names in common:: >>> ds7 = rt.Dataset({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}) >>> ds8 = rt.Dataset({'C': ['C0', 'C1', 'C2'], 'D': ['D0', 'D1', 'D2']}) >>> ds7 # A B - -- -- 0 A0 B0 1 A1 B1 2 A2 B2 >>> ds8 # C D - -- -- 0 C0 D0 1 C1 D1 2 C2 D2 >>> ds9 = rt.Dataset.concat_columns([ds7, ds8], do_copy=True) >>> ds9 # A B C D - -- -- -- -- 0 A0 B0 C0 D0 1 A1 B1 C1 D1 2 A2 B2 C2 D2 Note that ``do_copy`` is a required argument for ``concat_columns()``. When ``do_copy=True``, changes you make to values in the original Datasets do not change the values in your new, concatenated Dataset, and vice-versa. When your two Datasets have a column name (or names) in common, you need to specify which data you want to keep – the data from the shared column(s) in first Dataset or the data from the shared column(s) in the second Dataset. We’ll give our second Dataset an ‘A’ column:: >>> ds8.A = rt.FA(['A3', 'A4', 'A5']) If you try to concatenate the two Datasets, you get an error:: >>> try: ... rt.Dataset.concat_columns([ds7, ds8], do_copy=True) ... except KeyError as e: ... print("KeyError:", e) KeyError: "Duplicate column 'A'" To keep the column data from the first Dataset, use ``on_duplicate='first'``. You’ll get a warning about mismatched column names, but the concatenation is performed:: >>> rt.Dataset.concat_columns([ds7, ds8], do_copy=True, on_duplicate='first') C:\\riptable\\rt_dataset.py:5628: UserWarning: concat_columns() duplicate column mismatch: {'A'} warnings.warn(f'concat_columns() duplicate column mismatch: {dups!r}') # A B C D - -- -- -- -- 0 A0 B0 C0 D0 1 A1 B1 C1 D1 2 A2 B2 C2 D2 You can turn off this warning by adding ``on_mismatch='ignore'``. To keep the column data from the second dataset, use ``on_duplicate='last'``:: >>> rt.Dataset.concat_columns([ds7, ds8], on_duplicate='last', do_copy=True) # A B C D - -- -- -- -- 0 A3 B0 C0 D0 1 A4 B1 C1 D1 2 A5 B2 C2 D2 Note: To concatenate Datasets column-wise, the columns must all be the same length – Riptable does not fill in missing column values the way it does missing row values:: >>> ds9 = rt.Dataset({'E': ['E0', 'E1']}) >>> try: ... rt.Dataset.concat_columns([ds8, ds9], do_copy=True) ... except ValueError as e: ... print("ValueError:", e) ValueError: Inconsistent Dataset lengths {2, 3} Concatenation is sufficient in certain situations, but it helps to have more flexibility to bring data from two Datasets together. Next, we’ll cover how to `Merge Datasets <tutorial_merge.rst>`__. -------------- Questions or comments about this guide? Email RiptableDocumentation@sig.com.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/tutorial_concat.rst
0.915616
0.731299
tutorial_concat.rst
pypi
Instantiate with Placeholder Values and Generate Sample Data ============================================================ It’s useful to have a few tools in your back pocket for generating data quickly – either placeholder values (like 1s or 0s) meant to temporarily fill a certain structure you’re instantiating, or sample values that mimic real data, which you can use to explore and experiment with Riptable. Here’s a brief sampling of Riptable and NumPy methods you can use. For complete details about these functions, see their API reference documentation. Generate Placeholder Values ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following methods generate repeated 0s and 1s. 10 floating-point zeros:: >>> rt.zeros(10) FastArray([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) 10 integer zeros:: >>> rt.zeros(10, int) FastArray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) 10 floating-point ones:: >>> rt.ones(10) FastArray([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) 10 integer ones:: >>> rt.ones(10, int) FastArray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) The following methods generate repeated specified values. 10 fives:: >>> rt.repeat(5, 10) FastArray([5, 5, 5, 5, 5, 5, 5, 5, 5, 5]) 10 repeats of each array element:: >>> rt.repeat([1, 2], 10) FastArray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) 10 "tiles" of the entire array:: >>> rt.tile([1, 2], 10) FastArray([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]) 10 twos:: >>> rt.full(10, 2) FastArray([2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) Generate Sample Data ~~~~~~~~~~~~~~~~~~~~ Most of these methods generate a range of values. ``arange()`` generates evenly spaced floating-point or integer values (depending on the input) within a given interval, including the start value but excluding the stop value. You can also specify a step size (the spacing between the values; the default is 1). It’s like Python’s *range* function, but it returns a FastArray rather than a list. Numbers 0 through 9:: >>> rt.arange(10) FastArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) Every second number from 1 to 9:: >>> rt.arange(1, 10, 2) FastArray([1, 3, 5, 7, 9]) For evenly spaced values where the step is a non-integer, it’s better to use ``np.linspace``. Instead of specifying the step value, you specify the number of elements. Both the start and stop values are included:: >>> np.linspace(2.0, 3.0, num=5) # Five evenly spaced numbers from 2.0 to 3.0 (the step is 0.25) array([2. , 2.25, 2.5 , 2.75, 3. ]) For randomly generated values, you have several options. For integers and floating-point values, NumPy has you covered. Call ``default_rng`` to get a new instance of a NumPy Generator, then call its methods. To generate values that can be replicated, initialize with a seed value of your choice to initialize the BitGenerator:: >>> rng = np.random.default_rng(seed=42) 10 floating-point numbers between 0.0 and 1.0 (1.0 excluded):: >>> rng.random(10) array([0.77395605, 0.43887844, 0.85859792, 0.69736803, 0.09417735, 0.97562235, 0.7611397 , 0.78606431, 0.12811363, 0.45038594]) 10 uniformly distributed floats between 0 and 50 (50 excluded):: >>> rng.uniform(0, 50, 10) array([18.53990121, 46.33824944, 32.193256 , 41.13808066, 22.17070994, 11.36193609, 27.72923935, 3.19086281, 41.3815586 , 31.58321996]) 10 integers between 1 and 50 (50 excluded):: >>> rng.integers(1, 50, 10) array([ 9, 38, 35, 18, 4, 48, 22, 44, 34, 39], dtype=int64) 10 strings chosen from a list:: >>> rng.choice(['GME', 'AMZN', 'TSLA', 'SPY'], 10) array(['SPY', 'GME', 'AMZN', 'AMZN', 'AMZN', 'GME', 'TSLA', 'GME', 'TSLA', 'TSLA'], dtype='<U4') 10 random Booleans:: >>> rng.choice([True, False], 10) array([False, False, True, False, True, True, False, True, True, True]) See `NumPy’s documentation <https://numpy.org/doc/stable/user/index.html>`__ for more details and other methods. Riptable has methods for generating random Date and DateTimeNano arrays. 5 DateTimeNanos with NYT time zone:: >>> rt.DateTimeNano.random(5) DateTimeNano(['20000507 22:02:14.350793900', '20040720 00:24:28.668289697', '19771017 22:34:39.521017110', '20130819 05:29:22.584265022', '20170622 00:50:06.970974486'], to_tz='NYC') Dates between a start date and an end date (start and end dates included; the default step is 1 day):: >>> rt.Date.range('20190201', '20190208') Date(['2019-02-01', '2019-02-02', '2019-02-03', '2019-02-04', '2019-02-05', '2019-02-06', '2019-02-07', '2019-02-08']) 5 dates, spaced two days apart, with a specified start date (start date included):: >>> rt.Date.range('20190201', days=5, step=2) Date(['2019-02-01', '2019-02-03', '2019-02-05', '2019-02-07', '2019-02-09']) Though ``Date`` objects don’t (yet) have a ``random`` method, you can use ``rng.choice`` to pick dates from a range:: >>> rt.Date(rng.choice(rt.Date.range('20220201', '20220430'), 5)) Date(['2022-04-12', '2022-02-17', '2022-03-14', '2022-02-12', '2022-04-03']) Next we cover ways to get data in and out of Riptable: `Work with Riptable Files and Other File Formats <tutorial_io.rst>`__. -------------- Questions or comments about this guide? Email RiptableDocumentation@sig.com.
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/tutorial_sample_data.rst
0.873296
0.911731
tutorial_sample_data.rst
pypi
Riptable Categoricals -- Indexing ********************************* Bracket indexing traverses the FastArray of indices/codes and returns the corresponding category. When a Categorical is indexed with a single integer, the corresponding category is returned as a unicode string. When multiple integers or a boolean array are used, a copy of the Categorical is returned that has the same categories as the original Categorical but with an index/code array limited to the selected elements. If you modify the returned subset, it won't affect the original Categorical. When a slice is used, the returned Categorical is a view, not a copy. If you modify the view, the original Categorical is also modified. To set a value to a new value, the new value must be already represented in the existing categories array. The following examples use this Categorical:: >>> c = rt.Categorical(["a", "a", "b", "a", "c", "c", "b"]) >>> c Categorical([a, a, b, a, c, c, b]) Length: 7 FastArray([1, 1, 2, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Single integer -------------- Use bracket indexing to get a single value:: >>> c[0] 'a' >>> c[1] 'a' >>> c[2] 'b' You can also index from the end of the array with negative indices:: >>> c[-1] 'b' >>> c[-2] 'c' Set a value:: >>> c[0] = "c" >>> c Categorical([c, a, b, a, c, c, b]) Length: 7 FastArray([3, 1, 2, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 The value must be already represented in the existing categories array (adding categories using ``auto_add_categories`` isn't working correctly at the time of this writing):: >>> try: ... c[0] = "d" ... except ValueError as e: ... print("ValueError:", e) ValueError: Cannot automatically add categories [b'd'] while auto_add_categories is set to False. Set flag to True in Categorical init. Multiple integers ----------------- >>> c Categorical([c, a, b, a, c, c, b]) Length: 7 FastArray([3, 1, 2, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Pass a list of indices (a fancy index, which also specifies ordering). The returned Categorical is a copy of the original Categorical:: >>> c[[0, 2]] Categorical([c, b]) Length: 2 FastArray([3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c[[2, 0]] Categorical([b, c]) Length: 2 FastArray([2, 3], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c[[-1, 1]] Categorical([b, a]) Length: 2 FastArray([2, 1], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Or pass an array:: >>> c[rt.arange(1, 3)] # Indices 1 and 2. Categorical([a, b]) Length: 2 FastArray([1, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Set values:: >>> c[[0, 2]] = "a" >>> c Categorical([a, a, a, a, c, c, b]) Length: 7 FastArray([1, 1, 1, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c[rt.arange(1, 3)] = "b" >>> c Categorical([a, b, b, a, c, c, b]) Length: 7 FastArray([1, 2, 2, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Boolean mask array ------------------ >>> c Categorical([a, b, b, a, c, c, b]) Length: 7 FastArray([1, 2, 2, 1, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 The returned Categorical is a copy of the original Categorical:: >>> mask = rt.FA([False, True, True, True, True, True, False]) >>> c[mask] Categorical([a, b, a, c, c]) Length: 5 FastArray([1, 2, 1, 3, 3], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Set values:: >>> c[mask] = "c" >>> c Categorical([a, c, c, c, c, c, b]) Length: 7 FastArray([1, 3, 3, 3, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Slice ----- >>> c Categorical([a, c, c, c, c, c, b]) Length: 7 FastArray([1, 3, 3, 3, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 The returned Categorical is a view of the original Categorical. Any changes to the view also modify the original (see below):: >>> c[:3] # Indices 0-2. Categorical([a, c, c]) Length: 3 FastArray([1, 3, 3], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c[1:6] # Indices 1-5. Categorical([c, c, c, c, c]) Length: 5 FastArray([3, 3, 3, 3, 3], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Set values:: >>> c[1:6] = "a" Categorical([a, a, a, a, a, a, b]) Length: 7 FastArray([1, 1, 1, 1, 1, 1, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 Slicing returns a view, not a copy. So if you set values in the returned subset, the original Categorical is modified:: >>> c2 = c[1:6] >>> c2 Categorical([a, a, a, a, a]) Length: 5 FastArray([1, 1, 1, 1, 1], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c2[1:5] = "c" # Modify the returned view. >>> c2 Categorical([a, c, c, c, c]) Length: 5 FastArray([1, 3, 3, 3, 3], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3 >>> c # The original is also modified. Categorical([a, a, c, c, c, c, b]) Length: 7 FastArray([1, 1, 3, 3, 3, 3, 2], dtype=int8) Base Index: 1 FastArray([b'a', b'b', b'c'], dtype='|S1') Unique count: 3
/riptable-1.13.0.tar.gz/riptable-1.13.0/docs/source/tutorial/categoricals_user_guide_indexing.rst
0.950949
0.697068
categoricals_user_guide_indexing.rst
pypi
# Adapted from https://github.com/pandas-dev/pandas/blob/main/scripts/validate_docstrings.py # Module walking inspired by https://github.com/pyvista/numpydoc-validation/blob/main/numpydoc_validation/_validate.py from __future__ import annotations import argparse import doctest import importlib import inspect import io import json import os import pathlib import re import subprocess import sys import tempfile import typing import warnings try: import tomllib except ModuleNotFoundError: import tomli as tomllib import matplotlib import matplotlib.pyplot as plt import numpy from numpydoc.docscrape import get_doc_object from numpydoc.validate import ( Validator, validate, ) import pandas import riptable # With template backend, matplotlib plots nothing matplotlib.use("template") # Standardize on these display settings when executing examples riptable.Display.options.COL_ALL = True # display all Dataset columns riptable.Display.options.E_MAX = 100_000_000 # render up to 100MM before using scientific notation riptable.Display.options.P_THRESHOLD = 0 # truncate small decimals, rather than scientific notation riptable.Display.options.NUMBER_SEPARATOR = True # put commas in numbers ERROR_MSGS = { "GL99": "Error parsing docstring: {doc_parse_error}", "GL98": "Private classes ({mentioned_private_classes}) should not be " "mentioned in public docstrings", "GL97": "Use 'array-like' rather than 'array_like' in docstrings.", "GL96": "Warning validating docstring: {doc_validation_warning}", "SA99": "{reference_name} in `See Also` section does not need `riptable` " "prefix, use {right_reference} instead.", "EX99": "Examples do not pass tests:\n{doctest_log}", "EX98": "flake8 error {error_code}: {error_message}{times_happening}", "EX97": "Do not import {imported_library}, as it is imported automatically for the examples", "EX96": "flake8 warning {error_code}: {error_message}{times_happening}", "EX95": "black format error:\n{error_message}", } OUT_FORMAT_OPTS = "default", "json", "actions" NAMES_FROM_OPTS = "module", "rst" IGNORE_VALIDATION = { # Styler methods are Jinja2 objects who's docstrings we don't own. # "Styler.env", # "Styler.template_html", # "Styler.template_html_style", # "Styler.template_html_table", # "Styler.template_latex", # "Styler.template_string", # "Styler.loader", } PRIVATE_CLASSES = [ # "NDFrame", # "IndexOpsMixin", ] IMPORT_CONTEXT = { "np": numpy, "pd": pandas, "rt": riptable, } def riptable_error(code, **kwargs): """ Copy of the numpydoc error function, since ERROR_MSGS can't be updated with our custom errors yet. """ return (code, ERROR_MSGS[code].format(**kwargs)) def get_api_items(api_doc_fd): """ Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all riptable public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'riptable.FastArray.get_name). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """ current_module = "riptable" previous_line = current_section = current_subsection = "" position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set("-"): current_section = previous_line continue if set(line) == set("~"): current_subsection = previous_line continue if line.startswith(".. currentmodule::"): current_module = line.replace(".. currentmodule::", "").strip() continue if line == ".. autosummary::": position = "autosummary" continue if position == "autosummary": if line == "": position = "items" continue if position == "items": if line == "": position = None continue item = line.strip() if item in IGNORE_VALIDATION: continue func = importlib.import_module(current_module) for part in item.split("."): func = getattr(func, part) yield ( ".".join([current_module, item]), func, current_section, current_subsection, ) previous_line = line class RiptableDocstring(Validator): def __init__(self, func_name: str, doc_obj=None) -> None: self.func_name = func_name if doc_obj is None: doc_obj = get_doc_object(Validator._load_obj(func_name)) super().__init__(doc_obj) @property def name(self): return self.func_name @property def mentioned_private_classes(self): return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc] @property def examples_errors(self): flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL finder = doctest.DocTestFinder() runner = doctest.DocTestRunner(optionflags=flags) error_msgs = "" current_dir = set(os.listdir()) for test in finder.find(self.raw_doc, self.name, globs=IMPORT_CONTEXT): f = io.StringIO() runner.run(test, out=f.write) error_msgs += f.getvalue() leftovers = set(os.listdir()).difference(current_dir) if leftovers: for leftover in leftovers: path = pathlib.Path(leftover).resolve() if path.is_dir(): path.rmdir() elif path.is_file(): path.unlink(missing_ok=True) error_msgs += ( f"The following files were leftover from the doctest: " f"{leftovers}. Please use # doctest: +SKIP" ) return error_msgs @property def examples_source_code(self): lines = doctest.DocTestParser().get_examples(self.raw_doc) return [line.source for line in lines] def validate_pep8(self): if not self.examples: return # F401 is needed to not generate flake8 errors in examples # that do not use the imported context content = "" for k, v in IMPORT_CONTEXT.items(): content += f"import {v.__name__} as {k} # noqa: F401\n" content += "".join((*self.examples_source_code,)) error_messages = [] try: fd, fname = tempfile.mkstemp(prefix="val-", suffix=".py") file = os.fdopen(fd, mode="w", encoding="utf-8") file.write(content) file.close() cmd = ["python", "-m", "flake8", "--quiet", "--statistics", fname] response = subprocess.run(cmd, capture_output=True, text=True) if response.stderr: stderr = response.stderr.strip("\n") error_messages.append(f"1 ERROR {stderr}") stdout = response.stdout stdout = stdout.replace(fname, "") messages = stdout.strip("\n") if messages: error_messages.append(messages) finally: os.remove(fname) for error_message in error_messages: error_count, error_code, message = error_message.split(maxsplit=2) yield error_code, message, int(error_count) def validate_format(self): if not self.examples: return content = "".join((*self.examples_source_code,)) error_messages = [] try: fd, fname = tempfile.mkstemp(prefix="val-", suffix=".py") file = os.fdopen(fd, mode="w", encoding="utf-8") file.write(content) file.close() cmd = ["python", "-m", "black", "--quiet", "--diff", fname] response = subprocess.run(cmd, capture_output=True, text=True) if response.stderr: stderr = response.stderr.strip("\n") error_messages.append(stderr) stdout = response.stdout stdout = stdout.replace(fname, "<example>") messages = stdout.strip("\n") if messages: error_messages.append(messages) finally: os.remove(fname) for error_message in error_messages: yield error_message def non_hyphenated_array_like(self): return "array_like" in self.raw_doc def riptable_validate( func_name: str, errors: typing.Optional(list[str]) = None, not_errors: typing.Optional(list[str]) = None, flake8_errors: typing.Optional(list[str]) = None, flake8_not_errors: typing.Optional(list[str]) = None, ): """ Call the numpydoc validation, and add the errors specific to riptable. Parameters ---------- func_name : str Name of the object of the docstring to validate. Returns ------- dict Information about the docstring and the errors found. """ func_obj = Validator._load_obj(func_name) doc_parse_error = None with warnings.catch_warnings(record=True) as doc_warnings: try: doc_obj = get_doc_object(func_obj, doc=func_obj.__doc__) except ValueError as ex: doc_parse_error = str(ex) doc_obj = get_doc_object(func_obj, doc="") doc = RiptableDocstring(func_name, doc_obj) result = validate(doc_obj) if doc_parse_error: result["errors"].insert(0, riptable_error("GL99", doc_parse_error=doc_parse_error)) mentioned_errs = doc.mentioned_private_classes if mentioned_errs: result["errors"].append(riptable_error("GL98", mentioned_private_classes=", ".join(mentioned_errs))) if doc.see_also: for rel_name in doc.see_also: if rel_name.startswith("riptable."): result["errors"].append( riptable_error( "SA99", reference_name=rel_name, right_reference=rel_name[len("riptable.") :], ) ) def matches(test: str, matches: list[str]): for match in matches: if test.startswith(match): return True return False result["examples_errs"] = "" if doc.examples: result["examples_errs"] = doc.examples_errors if result["examples_errs"]: result["errors"].append(riptable_error("EX99", doctest_log=result["examples_errs"])) for error_code, error_message, error_count in doc.validate_pep8(): times_happening = f" ({error_count} times)" if error_count > 1 else "" result["errors"].append( riptable_error( "EX98" if error_code == "ERROR" or ( flake8_errors and matches(error_code, flake8_errors) and (not flake8_not_errors or not matches(error_code, flake8_not_errors)) ) else "EX96", error_code=error_code, error_message=error_message, times_happening=times_happening, ) ) examples_source_code = "".join(doc.examples_source_code) for wrong_import in [v.__name__ for v in IMPORT_CONTEXT.values()]: if re.search(f"import {wrong_import}\W+", examples_source_code): result["errors"].append(riptable_error("EX97", imported_library=wrong_import)) for error_message in doc.validate_format(): result["errors"].append( riptable_error( "EX95", error_message=error_message, ) ) if doc.non_hyphenated_array_like(): result["errors"].append(riptable_error("GL97")) plt.close("all") if doc_warnings: for doc_warning in doc_warnings: doc_validation_warning = warnings.formatwarning( doc_warning.message, doc_warning.category, doc_warning.filename, doc_warning.lineno ).strip() result["errors"].append(riptable_error("GL96", doc_validation_warning=doc_validation_warning)) if errors or not_errors: filtered_errors = [] for err_code, err_desc in result["errors"]: if not (errors and not matches(err_code, errors) or not_errors and matches(err_code, not_errors)): filtered_errors.append((err_code, err_desc)) result["errors"] = filtered_errors return result def get_all_objects(root: object) -> set[object]: objs = set() for name, obj in inspect.getmembers(root): # ignore any private names if name.startswith("_"): continue # ignore if obj is not class or routine if not inspect.isclass(obj) and not inspect.isroutine(obj): continue objs.add(obj) if inspect.isclass(obj): objs |= get_all_objects(obj) return objs def get_module_items(modulename) -> list[str]: module = importlib.import_module(modulename) items = [] for obj in get_all_objects(module): # extract the obj full name, ignoring anything not named try: fullname = obj.__module__ + "." + obj.__qualname__ except (AttributeError, TypeError): continue # ignore any objects not part of this module if not modulename in fullname: continue items.append((fullname, obj, None, None)) return items def validate_all( match: str, not_match: str = None, names_from: str = NAMES_FROM_OPTS[0], errors: typing.Optional(list[str]) = None, not_errors: typing.Optional(list[str]) = None, flake8_errors: typing.Optional(list[str]) = None, flake8_not_errors: typing.Optional(list[str]) = None, ignore_deprecated: bool = False, verbose: int = 0, ) -> dict: """ Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information. """ result = {} seen = {} api_items = [] if names_from == "rst": base_path = pathlib.Path(__file__).parent.parent api_doc_fnames = pathlib.Path(base_path, "doc", "source", "reference") for api_doc_fname in api_doc_fnames.glob("*.rst"): with open(api_doc_fname) as f: api_items += list(get_api_items(f)) else: api_items.extend(get_module_items("riptable")) api_items.sort(key=lambda v: v[0]) match_re = re.compile(match) if match else None not_match_re = re.compile(not_match) if not_match else None for func_name, _, section, subsection in api_items: if match_re and not match_re.search(func_name) or not_match_re and not_match_re.search(func_name): if verbose > 1: print(f"Ignoring {func_name} not matching prefix {match}") continue if verbose: print(f"Validating {func_name}... ", end="", flush=True) doc_info = riptable_validate( func_name, errors=errors, not_errors=not_errors, flake8_errors=flake8_errors, flake8_not_errors=flake8_not_errors, ) if ignore_deprecated and doc_info["deprecated"]: if verbose > 1: print(f"Ignoring deprecated {func_name}") continue if verbose: status = "FAILED" if len(doc_info["errors"]) else "OK" print(status) result[func_name] = doc_info shared_code_key = doc_info["file"], doc_info["file_line"] shared_code = seen.get(shared_code_key, "") result[func_name].update( { "in_api": True, "section": section, "subsection": subsection, "shared_code_with": shared_code, } ) seen[shared_code_key] = func_name return result def print_validate_all_results( match: str, not_match: str = None, names_from: str = NAMES_FROM_OPTS[0], errors: typing.Optional(list[str]) = None, not_errors: typing.Optional(list[str]) = None, flake8_errors: typing.Optional(list[str]) = None, flake8_not_errors: typing.Optional(list[str]) = None, out_format: str = OUT_FORMAT_OPTS[0], ignore_deprecated: bool = False, outfile: typing.IO = sys.stdout, verbose: int = 0, ): if out_format not in OUT_FORMAT_OPTS: raise ValueError(f'Unknown output_format "{out_format}"') result = validate_all( match, not_match=not_match, names_from=names_from, errors=errors, not_errors=not_errors, flake8_errors=flake8_errors, flake8_not_errors=flake8_not_errors, ignore_deprecated=ignore_deprecated, verbose=verbose, ) if verbose: print("Results:") if out_format == "json": json.dump(result, outfile, indent=2) else: prefix = "##[error]" if out_format == "actions" else "" for name, res in result.items(): for err_code, err_desc in res["errors"]: outfile.write(f'{prefix}{res["file"]}:{res["file_line"]}: {name}: {err_code}: {err_desc}\n') exit_status = 0 for name, res in result.items(): if len(res["errors"]): exit_status = 1 break if verbose: print("Validation " + ("OK!" if exit_status == 0 else "FAILED!")) return exit_status def print_validate_one_results( func_name: str, errors: typing.Optional(list[str]) = None, not_errors: typing.Optional(list[str]) = None, flake8_errors: typing.Optional(list[str]) = None, flake8_not_errors: typing.Optional(list[str]) = None, outfile: typing.IO = sys.stdout, verbose: int = 0, ): def header(title, width=80, char="#"): full_line = char * width side_len = (width - len(title) - 2) // 2 adj = "" if len(title) % 2 == 0 else " " title_line = f"{char * side_len} {title}{adj} {char * side_len}" return f"\n{full_line}\n{title_line}\n{full_line}\n\n" if verbose: print(f"Validating {func_name}... ", end="", flush=True) result = riptable_validate( func_name, errors=errors, not_errors=not_errors, flake8_errors=flake8_errors, flake8_not_errors=flake8_not_errors, ) if verbose: status = "FAILED" if len(result["errors"]) else "OK" print(status) outfile.write(header(f"Docstring ({func_name})")) outfile.write(f"{result['docstring']}\n") outfile.write(header("Validation")) if result["errors"]: outfile.write(f'{len(result["errors"])} Errors found:\n') for err_code, err_desc in result["errors"]: if err_code == "EX99": # Failing examples are printed at the end outfile.write(f"\t{err_code}: Examples do not pass tests\n") continue outfile.write(f"\t{err_code}: {err_desc}\n") else: outfile.write(f'Docstring for "{func_name}" correct. :)\n') if result["examples_errs"]: outfile.write(header("Doctests")) outfile.write(result["examples_errs"]) def find_parent_dir_containing(filename: str) -> typing.Optional[str]: cur_dir = os.getcwd() while not os.path.exists(os.path.join(cur_dir, filename)): cur_dir = os.path.dirname(cur_dir) if not cur_dir: return None return cur_dir def find_pyproject_toml() -> typing.Optional[str]: pyproj_toml_filename = "pyproject.toml" root_dir = find_parent_dir_containing(pyproj_toml_filename) return os.path.join(root_dir, pyproj_toml_filename) if root_dir else None def main(): """ Main entry point. Call the validation for one or for all docstrings. """ argparser = argparse.ArgumentParser(description="Validate riptable docstrings") argparser.add_argument( "function", nargs="?", default=None, help="Function or method to validate (e.g. riptable.FastArray.get_name) " "if not provided, all docstrings are validated and returned " "as JSON.", ) argparser.add_argument( "--names-from", default=NAMES_FROM_OPTS[0], choices=NAMES_FROM_OPTS, help=f"Source of names when searching for all docstrings. It can be one of {str(NAMES_FROM_OPTS)[1:-1]} (default: '%(default)s').", ) argparser.add_argument( "--format", default=OUT_FORMAT_OPTS[0], choices=OUT_FORMAT_OPTS, help="format of the output when validating " "multiple docstrings (ignored when validating one). " f"It can be one of {str(OUT_FORMAT_OPTS)[1:-1]} (default: '%(default)s').", ) argparser.add_argument( "--match", default=None, help="Regex pattern for matching " "docstring names, in order to decide which ones " 'will be validated. The match "FastArray" ' "will make the script validate all the docstrings " 'of methods containing "FastArray". It is ' "ignored if function option is provided.", ) argparser.add_argument( "--not-match", default=None, help="Regex pattern for not matching " "docstring names, in order to decide which ones " 'will be validated. The not-match "mapping" ' "will make the script validate all the docstrings " 'of methods not containing "mapping". ' "Any matches are performed first, then any not-matches are excluded. " "It is ignored if function option is provided.", ) argparser.add_argument( "--errors", default=None, help="Comma separated " "list of error codes to validate. By default it " "validates all errors. Ignored when validating " "a single docstring.", ) argparser.add_argument( "--not-errors", default=None, help="Comma separated " "list of error codes not to validate. Empty by default. " "Ignored when validating a single docstring.", ) argparser.add_argument( "--ignore-deprecated", default=False, action="store_true", help="If this flag is set, deprecated objects are ignored when validating all docstrings.", ) argparser.add_argument( "--flake8-errors", default=None, help="Comma separated list of flake8 error codes to treat as errors. Others are treated as warnings.", ) argparser.add_argument( "--flake8-not-errors", default=None, help="Comma separated list of flake8 error codes not to validate. Empty by default", ) argparser.add_argument( "--example-prelude", default=None, help="Optional code to execute before the examples.", ) argparser.add_argument( "--out", "-o", default=None, type=str, help="Output file path, else use stdout.", ) argparser.add_argument( "--verbose", "-v", default=0, action="count", help="Emit verbose progress output. Specify multiple times for more verbosity.", ) args = argparse.Namespace() pyproj_toml_path = find_pyproject_toml() if pyproj_toml_path: with open(pyproj_toml_path, "rb") as f: pyproj_toml = tomllib.load(f) config = pyproj_toml.get("tool", {}).get("validate_docstrings", {}) for k, v in config.items(): setattr(args, k, v) argparser.parse_args(namespace=args) with open(args.out, "w", encoding="utf-8", errors="backslashreplace") if args.out else open( sys.stdout.fileno(), "w", closefd=False ) as outfile: errors = args.errors.split(",") if args.errors is not None else None not_errors = args.not_errors.split(",") if args.not_errors is not None else None flake8_errors = args.flake8_errors.split(",") if args.flake8_errors is not None else None flake8_not_errors = args.flake8_not_errors.split(",") if args.flake8_not_errors is not None else None if args.function is None: return print_validate_all_results( match=args.match, not_match=args.not_match, names_from=args.names_from, errors=errors, not_errors=not_errors, flake8_errors=flake8_errors, flake8_not_errors=flake8_not_errors, out_format=args.format, ignore_deprecated=args.ignore_deprecated, outfile=outfile, verbose=args.verbose, ) else: print_validate_one_results( args.function, errors=errors, not_errors=not_errors, flake8_errors=flake8_errors, flake8_not_errors=flake8_not_errors, outfile=outfile, verbose=args.verbose, ) return 0 if __name__ == "__main__": sys.exit(main())
/riptable-1.13.0.tar.gz/riptable-1.13.0/dev_tools/validate_docstrings.py
0.670069
0.349783
validate_docstrings.py
pypi
import json from datetime import datetime from typing import Any, List from riptide.cloud.exception import RiptideClientException _ROUTE = "api/history" _MAX_IDENTIFIERS = 50 _MAX_TIME_DIFF = 31 # days class RiptideHistoryApp: """ Raw Historic Data - The value of a point as recorded at the site at various instances of time. Interpolated Historic Data - The value of a point as recorded at the site at various instances of time and also the estimated or interpolated values of the point for the missing periods by using re-sampling technique. Rolled-up Historic Data - The value of point condensed for a time period (e.g. 1 day, 10 days, 1 week and so on). Digested Historic Data - The value of a point summarized over a period of period of time (e.g. mean, sum, median and so on). This also allows user to apply a function on every value of a point like say add, subtract etc. """ def __init__(self, client): self._client = client def get_history(self, identifiers: List[str], start: datetime, end: datetime, # Optional: Raw Historic Data. reverse: bool = None, limit: int = None, output_tz: str = None, response_format: str = None, align_data: bool = None, # Optional: Interpolated Historic Data. interpolate: bool = None, period: int = None, # Optional: Rolled-up Historic Data. rollup: bool = None, freq: str = None, how: str = None, # Optional: Digested Historic Data. func: Any = None) -> dict: """ Retrieve history data. # Mandatory parameters :param identifiers: List of one-or-more entity property URIs. :param start: Datetime object. :param end: Datetime object. # Optional parameters: Raw Historic Data. :param limit: Maximum number of rows to return. Default is None in which case there is no upper limit. :param reverse: reverse=False will return the top `limit` number of rows. reverse=True will return the bottom `limit` number of rows. Defaults is False. :param output_tz: The timezone which the output data should be converted to. Default is None which means UTC. :param response_format: "default", "ordered_history", "compact_ordered_history", "ordered_list". Default is "default". :param align_data: Sort the underlying DataFrame based on its index. Default is False. # Optional parameters: Interpolated Historic Data. :param interpolate: A Boolean value indicating whether or not the data should be interpolated (re-sampled). If set to False, the raw history records are returned. Default is False. :param period: (used only when `interpolate` is True). The frequency at which to generate samples. # Optional parameters: Rolled-up Historic Data :param rollup: rollup=False will retrieve raw historic data. rollup=True will provide rolled-up historic data. Default is False. :param freq: (used only when `rollup` is True). The frequency at which to generate rollup records. Frequency Alias Description B business day frequency D calendar day frequency W weekly frequency M month end frequency BM business month end frequency MS month start frequency BMS business month start frequency Q quarter end frequency BQ business quarter endfrequency QS quarter start frequency BQS business quarter start frequency A year end frequency BA business year end frequency AS year start frequency BAS business year start frequency H hourly frequency T minutely frequency S secondly frequency L milliseonds U microseconds :param how: (used only when `rollup` is True). Method for down- or re-sampling. Can be one of the following strings: "sum", "mean", "std", "max", "min", "median", "first", "last", "ohlc". # Optional parameters: Digested Historic Data. :param func: History digest function. :return: History data in JSON format. """ if len(identifiers) > _MAX_IDENTIFIERS: raise RiptideClientException( "Length of identifiers must not exceed {}; " "Received {}".format(_MAX_IDENTIFIERS, len(identifiers)) ) if start > end: raise RiptideClientException( "start must not be greater than end; start={}, " "end={}".format(start, end) ) if (end - start).days > _MAX_TIME_DIFF: raise RiptideClientException( "You cannot retrieve more than {} days of historical " "data.".format(_MAX_TIME_DIFF) ) params = { "uris": json.dumps(identifiers), "start": start.replace(microsecond=0), "end": end.replace(microsecond=0) } if reverse is not None: params["reverse"] = reverse if limit is not None: params["limit"] = limit if output_tz is not None: params["output_tz"] = output_tz if response_format is not None: params["response_format"] = response_format if align_data is not None: params["align_data"] = align_data if interpolate is not None: params["interpolate"] = interpolate if period is not None: params["period"] = period if rollup is not None: params["rollup"] = rollup if freq is not None: params["freq"] = freq if how is not None: params["how"] = how if func is not None: params["func"] = func return self._client.rc_get(uri=_ROUTE, params=params) # __END__
/riptide-cloud-0.0.2.tar.gz/riptide-cloud-0.0.2/riptide/cloud/history.py
0.876105
0.603406
history.py
pypi
import json from typing import List _ROUTE = "api/watch" class RiptideWatchApp: def __init__(self, client): self._client = client def create_watch(self, identifiers: List[str]) -> str: """ Create a watch for the list of interested points. :param identifiers: list of entity property uri's. :return: watch id """ return self._client.rc_put(uri=_ROUTE, data=json.dumps(identifiers)) def get_watches(self) -> List[str]: """ Get id's of all active watches. :return: list of one-or-more watch id's. """ return self._client.rc_get(uri=_ROUTE) def poll_watch(self, watch_id: str) -> List[dict]: """ Retrieve info for the given Watch. :param watch_id: watch id. :return: watch info. """ return self._client.rc_get(uri="{}/{}".format(_ROUTE, watch_id)) # TODO: testing (Not working - 500) def poll_watch_changed(self, watch_id: str) -> List[dict]: """ Retrieve the most recently changed current values of the Entity properties associated with a particular Watch. Only the values that have changed since the last time the Watch was polled are returned. :param watch_id: watch id. :return: watch record for the changed points. """ return self._client.rc_get( uri="{}/{}?cov=true".format(_ROUTE, watch_id) ) def delete_watch(self, watch_id: str) -> dict: """ Delete watch. :param watch_id: watch id to delete :return: nothing. """ return self._client.rc_delete(uri="{}/{}".format(_ROUTE, watch_id)) def read_present_value(self, identifier: str) -> dict: """ Get present value of an entity property. :param identifier: entity property uri. :return: Current watch record for the given entity property """ return self._client.rc_get( uri="{}/presentValue/{}".format(_ROUTE, identifier) ) # __END__
/riptide-cloud-0.0.2.tar.gz/riptide-cloud-0.0.2/riptide/cloud/watch.py
0.587825
0.250502
watch.py
pypi
from datetime import datetime from typing import List from riptide.cloud.exception import RiptideClientException _ROUTE = "api/alarms" class RiptideAlarmApp: def __init__(self, client): self._client = client # TODO: this should be typing.Literal self._context = ["site_uuids", "entity_uuids", "point_uuids", "uuids"] def get_alarm(self, uuids: List[str], context: str) -> List[dict]: """ Get alarm information. :param uuids: list of uuids. :param context: valid values are "site_uuids", "entity_uuids", "point_uuids", "uuids" (alarm uuids). :return: JSON response. """ if context not in self._context: raise RiptideClientException( "context must be one of the following - " "{}".format(self._context) ) return self._client.rc_get( uri="{}?{}={}".format(_ROUTE, context, ",".join(uuids)) ) def get_alarm_history(self, uuids: List[str], context: str, start: datetime = None, end: datetime = None) -> List[dict]: """ Retrieve alarms historical data. :param uuids: list of uuids. :param context: valid values are "site_uuids", "entity_uuids", "point_uuids", "uuids" (alarm uuids). :param start: datetime object. :param end: datetime object. :return: JSON response. """ uri = "{}/history?{}={}".format(_ROUTE, context, ",".join(uuids)) if context not in self._context: raise RiptideClientException( "context must be one of the following - " "{}".format(self._context) ) if start: uri = "{}&start={}".format(uri, start) if end: uri = "{}&end={}".format(uri, end) return self._client.rc_get(uri=uri) # __END__
/riptide-cloud-0.0.2.tar.gz/riptide-cloud-0.0.2/riptide/cloud/alarms.py
0.415847
0.150153
alarms.py
pypi
# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## 0.2.4 - 2022-02-03 This version fixes a bug where all the functions in the python bindings to the C++ code (exposed in `riptide.libcpp` on the Python side) assume that their input numpy arrays are contiguous in memory. In practice, passing a column slice of a two-dimensional `float32` array to `fast_running_median` was found to produce incorrect results: the rather than reading `data[:, col_index]`, the code read `data[0, col_index:col_index+num_cols]`. The issue would only trigger on the `float32` type, because otherwise an implicit copy (contiguous in memory) of the input was created. Thanks to Akshay Suresh for finding the bug and reporting it. ### Fixed - All C++ functions in `python_bindings.cpp` now explicitly check that all input arrays are contiguous in memory, and throw `std::invalid_argument` otherwise (maps to `ValueError` in Python). - The size equality check of the input arrays for the `rollback` function in `python_bindings.cpp` is now correct ### Added - `running_median` function that wraps its counterpart in the C++ library. It ensures that the array passed to it is contiguous in memory, and if not, makes a temporary contiguous copy before doing so. - Unit test that checks the correctness of `running_median` on non-contiguous data slices. - The `riptide` module now exposes the functions `running_median` and `fast_running_median`. Document how boundary conditions are handled in running median calculation. Added entries for both functions in the documentation, in Kernel Functions section. ### Changed - Renamed the file `running_median.py` to `running_medians.py` to avoid name collisions. ## 0.2.3 - 2021-08-01 ### Updated - Packaging is now PEP 517/518 compliant - Using `setuptools-scm` instead of `versioneer` (which is not maintained anymore) for automatic versioning ## 0.2.2 - 2021-07-24 ### Added - Added full documentation and readthedocs integration. The docs can be found [here](https://riptide-ffa.readthedocs.io). - Fixed typos in docstrings, extra docstrings for `Periodogram` class ### Updated - Base Dockerfile on python:3.8, ensure all tests pass during the build ### Fixed - It is now possible to specify period ranges so that the data are searched at their raw resolution (no downsampling). This corresponds to choosing period range parameters such that `period_min = bins_min x tsamp`. In this case, the code would previously raise an error saying that it could not downsample the data with a factor equal to 1, which was not the intended behaviour. - Fixed an issue where the C++ functions `check_trial_widths` and `check_stdnoise` would systematically throw an exception regardless of their input arguments. The issue was occurring with `gcc 8.3.0` on some systems, and appears to have been caused by said functions having a return type `bool` while they actually did not return anything. The functions have `void` return type now. ## 0.2.1 - 2020-10-27 ### Added - Limited support for X-ray and Gamma time series in PRESTO inf/dat format. `TimeSeries.from_presto_inf()` will now correctly read these. However, a warning will be issued when doing so, because `riptide` is currently designed to process data where the background noise is Gaussian. **Be aware that processing high-energy data may produce junk results.** ### Fixed - Docstring of `TimeSeries.generate()` was missing the `period` parameter ## 0.2.0 - 2020-08-10 **This release contains significant improvements and additions but breaks compatibility with** `v0.1.x`. ### Changed - The `ffa_search()` function now only returns two values: the de-reddened `TimeSeries` that was actually searched, and a `Periodogram` object. The `ProcessingPlan` class has been removed. - Clean rewrite of all kernels in C++. Python bindings for said kernels are now generated with `pybind11` which requires a lot less boilerplate code. Kernel functions can imported in python from the `riptide.libcpp` submodule. - The python function `get_snr()` has been renamed `boxcar_snr()` - Moved unit tests to `riptide/tests` so they can be packaged with the rest of the module ### Added - The `Periodogram` returned by `ffa_search` now has a `foldbins` member, which is the number of phase bins that were used when folding the data for a particular trial period. Using this information, the pipeline now returns the *true* duty cycle of the candidates; it was previously returning only an estimate equal to `width / bins_avg` where `width` was the best pulse width in bins, and `bins_avg = (bins_min + bins_max) / 2`. - `running_median()` function in C++, it is around 8x faster than its former python counterpart. - Tests for `running_median()` - Tests for `boxcar_snr()` - DM trial selection by the pipeline is now considerably faster - The pipeline now infers the observing min/max frequencies and the number of channels from the input data if possible. It is now recommended to leave the fields `fmin`, `fmax`, and `nchans` blank in the pipeline configuration file. It is necessary to specify these values only when the dedispersed time series data format (e.g. SIGPROC) does not contain that information. - In the pipeline configuration file, the minimum and maximum trial DM fields can also be left blank. If so, the minimum and/or maximum DMs of the search are determined by the DM trials available to process. - Added careful validation of the pipeline configuration file (using the `schema` library). The pipeline will raise an exception early in the processing if the configuration is invalid, with a helpful error message. - Can now run the unit test suite from python code or IPython by calling `riptide.test()` ### Removed - Removed `ProcessingPlan` class. It used to be passed to the old C function that was computing the periodogram. Its job is now directly performed by the new C++ function `periodogram()`. - Removed old C kernels ## 0.1.5 - 2020-05-23 ### Changed - `find_peaks()` now always returns peaks in decreasing S/N order ### Added - `rseek` command-line application that searches a single dedispersed time series and prints a list of significant peaks found ## 0.1.4 - 2020-05-03 This version reduces RAM usage significantly. Trial periods are now stored as double precision floats. ### Fixed - Trial periods are now stored as *double* precision floats (`double` in C, `float` in python). When searching very long time series (several hours) and short trial periods, single precision floats were inaccurate enough that small groups of consecutive period trials erroneously ended up having the exact same value. Incorrect detection periods would propagate down the pipeline, and `Candidate` objects could end up being folded with a period slightly offset from the true value. ### Changed - The pipeline worker sub-processes now get passed a *file path* to a time series as an input, rather than a full `TimeSeries` object, which saves the cost of communicating a lot of data between processes - The buffers used for downsampling and FFA transforming the data when calculating periodograms are now given the smallest possible size. They only need to hold `N / f` data points, where `N` is the number of samples in the raw time series, and `f` the first downsampling factor in the search plan. Buffers were previously given a size of `N` which was often wasteful. ### Added - Added option to save pipeline logs to file ## 0.1.3 - 2020-04-27 ### Fixed - Fixed a rare but serious bug in the C function downsample() where the code would attempt to read one element beyond the end of the input array. This would happen only when the downsampling factor was such that `nsamp_input / dsfactor` was exactly an integer. This would cause the last sample in the output to have an absurd value, or a segmentation fault. - Fixed a crash in the pipeline that would occur when significant peak clusters were found, but none had a S/N that would exceed the `candidate_filters.snr_min` configuration parameter. Turning the resulting empty list of clusters into a CSV would then fail. - Fixed a related problem where clusters whose S/N was below `candidate_filters.snr_min` were not saved to the CSV file, which was not the intended behaviour. ### Added - Unit test suite, to be improved and expanded - Travis CI - Codecov integration ## 0.1.2 - 2020-04-15 ### Fixed - `Metadata` is now correctly carrying the "tobs" attribute for TimeSeries loaded from SIGPROC data. This was causing a cryptic error when processing SIGPROC dedispersed time series with the pipeline. ### Added - Can now read and process 8-bit SIGPROC data - Can now read SIGPROC header keys of boolean type. In particular the "signed" key (which defines the signedness of 8-bit SIGPROC data) is now supported by default. ## 0.1.1 - 2020-04-08 ### Fixed - Module should now properly install on OSX, where some C compilation options had to be adapted. `numpy.ctypeslib` also expects shared libraries to have a `.dylib` extension on OSX rather than the linux standard `.so` ## 0.1.0 - 2020-04-08 **This release contains major improvements and additions but breaks compatibility with** `v0.0.x`. If you have any ongoing large-scale searches or projects, stick with the older release you were using. Other users should **definitely** use this new version. ### Fixed - Ensure that each subprocess spawned by the pipeline consumes only one CPU, as initially intended to achieve optimal CPU usage. In previous versions, some `numpy` functions would attempt to run on all CPUs at once which was detrimental. As a result the pipeline is now considerably faster. - The impact of downsampling by a non-integer factor on the noise variance is now correctly dealt with when normalizing the output S/N values. Refer to the paper for details. The difference should be negligible, except when downsampling the original input data by factors close to 1. - The Makefile used to build the C sources does not explicitly set `CC=gcc` anymore. The default system compiler will now be used instead. `gcc` will be used by default if the environment variable `CC` is undefined. ### Changed - Clean rewrite of peak detection algorithm. It uses the same method as before, but the arguments of the `find_peaks()` function have changed. See docstring. - Now using JSON instead of HDF5 as the data product file format. This is vastly easier to maintain and future-proof. Read/write speed and file sizes remain similar. - Clean rewrite of the pipeline, which has been improved and runs faster, see below for all related changes. - Improved DM trial selection, now uses a method similar to PRESTO's `DDPlan` to achieve the least expensive DM space coverage - Input de-reddening and normalisation is now common to all search period sub-ranges, further improving pipeline run time - Harmonic flagging is now always performed - Removing harmonics from the output candidate list is now optional - Added option to produce all candidate plots at the end of the pipeline run - Candidate plots look nicer - Saving candidate files and plots is done with multiple CPUs and runs faster - The pipeline configuration file keywords / schema has been adjusted to match all pipeline changes, see example provided and documentation - `Candidate` class has been refactored, its attribute and method names have changed - Changed name of high level FFA transforms to `ffa1()` and `ffa2()`. - Updated signal generation functions so that the 'amplitude' parameter now represents the expected true signal-to-noise ratio ### Added - `TimeSeries` now has a `fold()` method that returns a numpy array representing either sub-integrations or a folded profile - Timing of all pipeline stages - Dynamic versioning using the `versioneer` module. In python, the current version is accessible via `riptide.__version__` - Added `ffafreq()` and `ffaprd()` to generate list of FFA transform trial freqs / periods. ### Removed - Removed the `SubIntegrations` class which added unneeded complexity, sub-integrations are now represented as a 2D numpy array ## 0.0.3 - 2019-11-30 ### Added - Full docstring for `ffa_search()` ### Changed - Cleaner and faster FFA C kernels, which have been moved to separate files - Slight optimisation to S/N calculation, where only the best value across all pulse phases is normalized, instead of normalizing at every phase trial - S/N calculation separated into smaller functions - Removed OpenMP multithreading from S/N calculation, it was slower in most usual cases. The benefits were visible only for very large input data. As a result, `ffa_search()` does not accept the 'threads' keyword anymore, and the 'threads' keyword has also been removed from the pipeline configuration files (in the 'search' section). Parallelism only happens at the TimeSeries level, i.e. one process per TimeSeries. ### Fixed - Reinstated the `--fast-math` compilation option that had been accidentally removed in v0.0.2. The code is now much faster. ## 0.0.2 - 2019-11-05 ### Added - `riptide` is now properly packaged with a `setup.py` script. Updated installation instructions in `README.md` - Updated Dockerfile. Build docker image with `make docker` command. ### Fixed - When normalising TimeSeries, use a float64 accumulator when calculating mean and variance. This avoid saturation issues on data with high values, e.g. from 8-bit Parkes UWL observations. ### Changed - Improved candidate plots: docstring for `Candidate` class, DM unit on plots, option to subtract the baseline value of the profile before displaying it, option to plot the profile as either a bar or line chart. ## 0.0.1 - 2018-10-25 ### Added - First stable release of riptide. This is the version that will be run on the LOTAAS survey.
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/CHANGELOG.md
0.91666
0.799403
CHANGELOG.md
pypi
import numpy as np from riptide.libffa import downsample def downsample_vertical(X, factor): m, __ = X.shape if not factor > 1: raise ValueError("factor must be > 1") if not factor < m: raise ValueError("factor must be strictly smaller than the number of input lines") Y = np.ascontiguousarray(X.T) out = np.asarray([downsample(arr, factor) for arr in Y]) return np.ascontiguousarray(out.T) def fold(ts, period, bins, subints=None): """ Fold TimeSeries at given period Parameters ---------- ts : TimeSeries Input time series to fold period : float Period in seconds bins : int Number of phase bins subints : int or None, optional Number of desired sub-integrations. If None, the number of sub-integrations will be the number of full periods that fit in the data (default: None) Returns ------- folded : ndarray The folded data as a numpy array. If subints > 1, it has a shape (subints, bins). Otherwise it is a 1D array with 'bins' elements. Raises ------ ValueError: if the data cannot be folded with the requested parameters, e.g. bin width is shorter than sampling time, or subint length is shorter than requested period """ if period > ts.length: raise ValueError("Period exceeds data length") tbin = period / bins if not tbin > ts.tsamp: raise ValueError("Bin width is shorter than sampling time") if subints is not None: subints = int(subints) if not subints >= 1: raise ValueError("subints must be >= 1 or None") full_periods = ts.length / period if subints > full_periods: raise ValueError(f"subints ({subints}) exceeds the number of signal periods that fit in the data ({full_periods})") factor = tbin / ts.tsamp tsdown = ts.downsample(factor) m = tsdown.nsamp // bins nsamp_eff = m * bins folded = tsdown.data[:nsamp_eff].reshape(m, bins) folded *= (m * factor) ** -0.5 if subints == 1 or m == 1: return folded.sum(axis=0) elif subints is None: return folded elif subints == m: return folded else: # vertical downsampling factor vf = m / subints return downsample_vertical(folded, vf)
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/folding.py
0.900193
0.528777
folding.py
pypi
import numpy as np ### Local module imports import riptide.libcpp as libcpp from .ffautils import generate_width_trials from .periodogram import Periodogram from .timing import timing @timing def ffa_search(tseries, period_min=1.0, period_max=30.0, fpmin=8, bins_min=240, bins_max=260, ducy_max=0.20, wtsp=1.5, deredden=True, rmed_width=4.0, rmed_minpts=101, already_normalised=False): """ Run a FFA search of a single TimeSeries object, producing its periodogram. Parameters ---------- tseries : TimeSeries The time series object to search period_min : float Minimum period to search in seconds period_max : float Maximum period to search in seconds fpmin : int Minimum number of signal periods that must fit in the data. In other words, place a cap on period_max equal to DATA_LENGTH / fpmin bins_min : int Minimum number of phase bins in the folded data. Higher values provide better duty cycle resolution. As the code searches longer trial periods, the data are iteratively downsampled so that the number of phase bins remains between bins_min and bins_max bins_max : int Maximum number of phase bins in the folded data. Must be strictly larger than bins_min, approx. 10% larger is a good choice wtsp : float Multiplicative factor between consecutive boxcar width trials. The smallest width is always 1 phase bin, and the sequence of width trials is generated with the formula: W(n+1) = max( floor(wtsp x W(n)), W(n) + 1 ) wtsp = 1.5 gives the following sequence of width trials (in number of phase bins): 1, 2, 3, 4, 6, 9, 13, 19 ... ducy_max : float Maximum duty cycle to optimally search. Limits the maximum width of the boxcar matched filters applied to any given profile. Example: on a 300 phase bin profile, ducy_max = 0.2 means that no boxcar filter of width > 60 bins will be applied deredden : bool Subtract red noise from the time series before searching rmed_width : float The width of the running median filter to subtract from the input data before processing, in seconds rmed_minpts : int The running median is calculated of a time scrunched version of the input data to save time: rmed_minpts is the minimum number of scrunched samples that must fit in the running median window Lower values make the running median calculation less accurate but faster, due to allowing a higher scrunching factor already_normalised : bool Assume that the data are already normalised to zero mean and unit standard deviation Returns ------- ts : TimeSeries The de-reddened and normalised time series that was actually searched pgram : Periodogram The output of the search, which contains among other things a 2D array representing S/N as a function of trial period and trial width. """ ### Prepare data: deredden then normalise IN THAT ORDER if deredden: tseries = tseries.deredden(rmed_width, minpts=rmed_minpts) if not already_normalised: tseries = tseries.normalise() widths = generate_width_trials(bins_min, ducy_max=ducy_max, wtsp=wtsp) periods, foldbins, snrs = libcpp.periodogram( tseries.data, tseries.tsamp, widths, period_min, period_max, bins_min, bins_max ) pgram = Periodogram(widths, periods, foldbins, snrs, metadata=tseries.metadata) return tseries, pgram
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/search.py
0.913242
0.747639
search.py
pypi
import logging import typing from math import ceil import numpy as np from riptide.clustering import cluster1d from riptide.timing import timing log = logging.getLogger('riptide.peak_detection') class Peak(typing.NamedTuple): """ A simple NamedTuple with the essential parameters of a peak found in a Periodogram """ period: float freq: float width: int ducy: float # duty cycle iw: int # width trial index ip: int # period trial index snr: float dm: float def summary_dict(self): """ Returns a minimal dictionary of attributes to be written as CSV by the pipeline """ attrs = ('period', 'freq', 'dm', 'width', 'ducy', 'snr') return {a: getattr(self, a) for a in attrs} def segment_stats(f, s, T, segwidth=5.0): """ Cut a periodogram in consecutive, equal-sized segments with a frequency span equal to segwidth / T, and return the centre frequencies, median S/N and robust S/N standard deviation of all segments. This information is then used to fit a sensible peak selection threshold as a function of frequency. Parameters ----------- f : ndarray Frequencies in Hz S : ndarray Signal-to-noise ratios for a single width trial T : float Integration time in seconds segwidth : float Frequency segment width expressed in units of 1/T Returns ------- fc : ndarray Median frequency of the segments smed : ndarray Median S/N of the segments sstd : ndarray S/N standard deviation of the segments, measured from the interquartile range of the segment's S/N distribution (stddev = IQR / 1.349) """ w = segwidth / T #log.debug("Segment width (Hz): {:.6f}".format(w)) # NOTE: the spacing of frequency trials is almost constant m = ceil(abs(f[-1] - f[0]) / w) # number of segments #log.debug("Segments: {:d}".format(m)) p = len(f) // m # number of complete segments #log.debug("Points/segment: {:d}".format(p)) n = m * p # effective number of elements f = f[:n] s = s[:n] fc = np.median(f.reshape(m, p), axis=1) s25, smed, s75 = np.percentile(s.reshape(m, p), (25, 50, 75), axis=-1) sstd = (s75 - s25) / 1.349 return fc, smed, sstd def fit_threshold(fc, tc, polydeg=2): """ Fit a polynomial in log(f) to the selection threshold control points (fc, tc) Parameters ---------- fc : ndarray Frequency of the control points tc : ndarray Value of the selection threshold at the control frequencies polydeg : int Degree of the log(f) polynomial to fit Returns ------- poly : np.poly1d Polynomial in log(f) that represents the selection threshold as a function of frequency """ coeffs = np.polyfit(np.log(fc), tc, polydeg) return np.poly1d(coeffs) def find_peaks_single(f, s, T, smin=6.0, segwidth=5.0, nstd=7.0, minseg=10, polydeg=2, clrad=0.1): """ Find peaks in a single pulse width trial. Returns a list of array indices that correspond to peak centres """ peak_indices = [] # Control points fc, smed, sstd = segment_stats(f, s, T, segwidth=segwidth) sc = smed + nstd * sstd # Selection threshold: polynomial in log(f) if len(fc) >= minseg: poly = fit_threshold(fc, sc, polydeg=polydeg) polyco = poly.coefficients else: # constant threshold if not enough points for fit polyco = [smin] poly = np.poly1d(polyco) # Selected frequencies and frequency indices dynthr = poly(np.log(f)) mask = (s > dynthr) & (s > smin) indices = np.where(mask)[0] fsel = f[indices] clusters = cluster1d(fsel, clrad / T) for cl in clusters: ix = indices[cl] ipeak = s[ix].argmax() ipeak = ix[ipeak] peak_indices.append(ipeak) return peak_indices, polyco @timing def find_peaks(pgram, smin=6.0, segwidth=5.0, nstd=6.0, minseg=10, polydeg=2, clrad=0.1): """ Identify significant peaks in a periodogram using a dynamically fitted S/N selection threshold. The fitting involves the following procedure for each pulse width trial separately: 1. Cut the frequency range covered by the periodogram in segments of length 1 / T_obs 2. Get the median S/N 'm' and robust S/N standard deviation 's' of each segment. The dynamic selection threshold for that segment should be t = m + nstd x s 3. Fit a polynomial in log(f) to the control points (f_i, t_i) thus obtained 4. Any point whose S/N exceeds both the dynamic threshold and the value 'smin' are considered significant 5. Cluster these points. Two points are in the same peak if their trial frequencies are within clrad / T_obs of each other. All such clusters constitute a Peak. Parameters ---------- pgram : Periodogram Input periodogram to search for peaks smin : float, optional Minimum S/N that a peak must exceed, in addition to having to exceed the dynamic selection threshold segwidth : float, optional Width of a frequency segment in units of 1 / T_obs nstd : float, optional See above for an explanation minseg : float, optional Minimum number of segments below which only a static selection threshold is applied polydeg : float, optional Degree of polynomial in log(f) clrad : float, optional Clustering radius in frequency space, in units of 1 / T_obs Returns ------- peaks: list List of Peak objects polycos: dict Dictionary of polynomial coefficients {iw: polyco} where 'iw' is the width trial index, and 'polyco' a list of polynomial coefficients in log(f). These can be passed to np.poly1d """ f = pgram.freqs T = pgram.tobs dm = pgram.metadata['dm'] peaks = [] polycos = {} for iw, width in enumerate(pgram.widths): s = pgram.snrs[:, iw].astype(float) cur_peak_indices, cur_polycos = find_peaks_single( f, s, T, smin=smin, segwidth=segwidth, nstd=nstd, minseg=minseg, polydeg=polydeg, clrad=clrad ) for ipeak in cur_peak_indices: peak_freq = f[ipeak] peak_bins = pgram.foldbins[ipeak] peak_ducy = float(width) / peak_bins # NOTE: type enforcement is necessary, otherwise some Peak members # have np.float32 type which causes trouble down the line # NOTE 2: dm can be None on fake time series peak = Peak( freq=float(peak_freq), period=float(1.0/peak_freq), width=int(width), ducy=float(peak_ducy), iw=int(iw), ip=int(ipeak), snr=float(s[ipeak]), dm=dm) #log.debug(peak) peaks.append(peak) polycos[iw] = cur_polycos peaks = sorted(peaks, key=lambda p: p.snr, reverse=True) return peaks, polycos
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/peak_detection.py
0.91534
0.61973
peak_detection.py
pypi
import os import pprint import json from astropy.coordinates import SkyCoord import astropy.units as uu from schema import Schema, And, Or, Optional from .reading import PrestoInf, SigprocHeader SCHEMA_ITEMS = { Optional('source_name') : Or(str, None), Optional('skycoord') : Or(SkyCoord, None), Optional('dm'): Or(And(float, lambda x: x >= 0), None), Optional('mjd'): Or(And(float, lambda x: x >= 0), None), Optional('tobs'): Or(And(float, lambda x: x > 0), None), Optional('fname'): Or(str, None), # Accept any extra keys of type string with JSON-serializable values Optional(str): json.dumps } SCHEMA = Schema(SCHEMA_ITEMS, ignore_extra_keys=True) class Metadata(dict): """ A dict subclass that carries information about an observation across all data products (TimeSeries, Periodogram, etc.) The 'attrs' dictionary can only have keys of type str and json-serializable values (there are some exceptions, see below). There are also reserved keys which, if present, must match the criteria below: - source_name: str - skycoord: astropy.coordinates.Skycoord - dm: float, positive - mjd: float, positive - tobs: float, strictly positive - fname: str If any of the above keys are NOT present, they will be set to None in the Metadata object. """ def __init__(self, items={}): SCHEMA.validate(items) super(Metadata, self).__init__(items) for k in SCHEMA_ITEMS: if isinstance(k.schema, str): self.setdefault(k.schema, None) @classmethod def from_presto_inf(cls, inf): """ Create Metadata object from PRESTO .inf file or PrestoInf object Parameters ---------- inf : PrestoInf or str PrestoInf object or path to a PRESTO .inf file """ # Interpret 'inf' as a file path if it is a string if type(inf) == str: inf = PrestoInf(inf) attrs = dict(inf) attrs['skycoord'] = inf.skycoord attrs['fname'] = os.path.realpath(inf.fname) attrs['tobs'] = attrs['tsamp'] * attrs['nsamp'] return cls(attrs) @classmethod def from_sigproc(cls, sh, extra_keys={}): """ Create Metadata object from SIGPROC dedispersed time series file, or SigprocHeader object. Parameters ---------- sh : SigprocHeader or str SigprocHeader object or path to a PRESTO .inf file """ # Interpret 'sh' as a file path if it is a string if type(sh) == str: sh = SigprocHeader(sh, extra_keys=extra_keys) if sh['nchans'] > 1: raise ValueError(f"File {sh.fname!r} contains multi-channel data (nchans = {sh['nchans']}), instead of a dedispersed time series") # Make sure this is a 32-bit dedispersed time series # We support either: 32-bit float data, or 8-bit data but only if signedness is specified in the header nbits = sh['nbits'] if not nbits in {8, 32}: raise ValueError(f"Only 8-bit and 32-bit SIGPROC data are supported. File {sh.fname!r} contains {nbits}-bit data") if nbits == 8 and 'signed' not in sh: raise ValueError(f"SIGPROC Header says this is 8-bit data, but does not specify its signedness via the 'signed' key") attrs = dict(sh).copy() attrs['dm'] = attrs.get('refdm', None) attrs['skycoord'] = sh.skycoord attrs['source_name'] = attrs.get('source_name', None) attrs['mjd'] = attrs.get('tstart', None) attrs['fname'] = os.path.realpath(sh.fname) attrs['tobs'] = sh.tobs return cls(attrs) def to_dict(self): return dict(self) @classmethod def from_dict(cls, items): return cls(items) def __str__(self): return 'Metadata %s' % pprint.pformat(dict(self)) def __repr__(self): return str(self)
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/metadata.py
0.741019
0.331039
metadata.py
pypi
import logging from collections import namedtuple import numpy as np import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import astropy.units as uu from astropy.time import Time log = logging.getLogger('riptide.candidate') class Candidate(object): """ Final data product of the riptide pipeline Attributes ---------- params : dict Dictionary with best-fit parameters of the signal: period, freq, dm, width, ducy, snr tsmeta : Metadata Metadata of the TimeSeries object (DM trial) in which the Candidate was found to have the highest S/N, and from which it was folded peaks : pandas.DataFrame A pandas DataFrame with the attributes of the periodogram peaks associated to the Candidate subints : ndarray A two-dimensional numpy array with shape (num_subints, num_bins) containing the folded sub-integrations profile : ndarray Folded profile as a one-dimensional numpy array, normalised such that the background noise standard deviation is 1, and the mean of the profile is zero dm_curve : tuple Tuple of numpy arrays (dm, snr) containing respectively the sequence of DM trials, and corresponding best S/N value across all trial widths """ def __init__(self, params, tsmeta, peaks, subints): self.params = params self.tsmeta = tsmeta self.peaks = peaks self.subints = subints def to_dict(self): """ Convert to dictionary for serialization """ return { 'params': self.params, 'tsmeta': self.tsmeta, 'peaks': self.peaks, 'subints': self.subints } @property def profile(self): if self.subints.ndim == 1: return self.subints return self.subints.sum(axis=0) @property def dm_curve(self): # NOTE: copy() works around a bug in pandas 0.23.x and earlier # https://stackoverflow.com/questions/53985535/pandas-valueerror-buffer-source-array-is-read-only # TODO: consider requiring pandas 0.24+ in the future df = self.peaks.copy().groupby('dm').max() return df.index.values, df.snr.values @classmethod def from_pipeline_output(cls, ts, peak_cluster, bins, subints=1): """ Method used by the pipeline to produce a candidate from intermediate data products. subints can be an int or None. None means pick the number of subints that fit inside the data. If 'subints' is too large to fit in the data, then this function will call TimeSeries.fold() with subints=None. """ centre = peak_cluster.centre P0 = centre.period if subints is not None and subints * P0 >= ts.length: msg = ( f"Period ({P0:.3f}) x requested subints ({subints:d}) exceeds time series length " f"({ts.length:.3f}), setting subints = full periods that fit in the data" ) log.debug(msg) subints = None subints_array = ts.fold(centre.period, bins, subints=subints) return cls(centre.summary_dict(), ts.metadata, peak_cluster.summary_dataframe(), subints_array) @classmethod def from_dict(cls, items): """ De-serialize from dictionary """ return cls(items['params'], items['tsmeta'], items['peaks'], items['subints']) def plot(self, figsize=(18, 4.5), dpi=80): """ Create a plot of the candidate Parameters ---------- figsize : tuple figsize argument passed to plt.figure() dpi : int dpi argument passed to plt.figure() Returns ------- fig : matplotlib.Figure """ fig = plt.figure(figsize=figsize, dpi=dpi) plot_candidate(self) return fig def show(self, **kwargs): """ Create a plot of the candidate and display it. Accepts the same keyword arguments as plot(). """ self.plot(**kwargs) plt.show() def savefig(self, fname, **kwargs): """ Create a plot of the candidate and save it as PNG under the specified file name. Accepts the same keyword arguments as plot(). """ fig = self.plot(**kwargs) fig.savefig(fname) plt.close(fig) def __str__(self): name = type(self).__name__ return f"{name}({self.params})" def __repr__(self): return str(self) TableEntryBase = namedtuple('TableEntry', ['name', 'value', 'formatter', 'unit']) class TableEntry(TableEntryBase): def plot(self, X, y, **kwargs): """ X : list list of X coordinates for each column y : float Y coordinate of the line """ assert(len(X) == 3) fmt = "{{:{}}}".format(self.formatter) plt.text(X[0], y, self.name, **kwargs) plt.text(X[1], y, fmt.format(self.value), ha='right', **kwargs) plt.text(X[2], y, self.unit, **kwargs) def plot_table(params, tsmeta): """ """ plt.axis('off') coord = tsmeta['skycoord'] ra_hms = coord.ra.to_string(unit=uu.hour, sep=':', precision=2, pad=True) dec_hms = coord.dec.to_string(unit=uu.deg, sep=':', precision=2, pad=True) # TODO: Check that the scale is actually UTC in the general case # PRESTO, SIGPROC and other packages may not have the same # date/time standard obsdate = Time(tsmeta['mjd'], format='mjd', scale='utc', precision=0) blank = TableEntry(name='', value='', formatter='s', unit='') entries = [ TableEntry(name='Period', value=params['period'] * 1000.0, formatter='.6f', unit='ms'), TableEntry(name='DM', value=params['dm'], formatter='.2f', unit='pc cm$^{-3}$'), TableEntry(name='Width', value=params['width'], formatter='d', unit='bins'), TableEntry(name='Duty cycle', value=params['ducy'] * 100.0, formatter='.2f', unit='%'), TableEntry(name='S/N', value=params['snr'], formatter='.1f', unit=''), blank, TableEntry(name='Source', value=tsmeta['source_name'], formatter='s', unit=''), TableEntry(name='RA', value=ra_hms, formatter='s', unit=''), TableEntry(name='Dec', value=dec_hms, formatter='s', unit=''), TableEntry(name='MJD', value=obsdate.mjd, formatter='.6f', unit=''), TableEntry(name='UTC', value=obsdate.iso, formatter='s', unit=''), ] y0 = 0.94 # Y coordinate of first line dy = 0.105 # line height X = [0.0, 0.80, 0.84] # Coordinate of columns name, value, unit for ii, entry in enumerate(entries): entry.plot(X, y0 - ii * dy, family='monospace') def plot_dm_curve(dm, snr): dm_min = dm.min() dm_max = dm.max() plt.plot(dm, snr, color='r', marker='o', markersize=3) # Avoid matplotlib warning when calling xlim() with two equal values if dm_min == dm_max: plt.xlim(dm_min - 0.5, dm_min + 0.5) else: plt.xlim(dm_min, dm_max) plt.grid(linestyle=':') plt.xlabel("DM (pc cm$^{-3}$)") plt.ylabel("Best S/N") def plot_subints(X, T): """ X : ndarray Sub-integrations array, shape = (nsubs, nbins) T : float Integration time in seconds """ __, nbins = X.shape X = np.hstack((X, X[:, :nbins//2])) __, nbins_ext = X.shape plt.imshow( X, cmap='Greys', interpolation='nearest', aspect='auto', extent=[-0.5, nbins_ext-0.5, T, 0] # Note: t = 0 is at the top of the plot ) plt.fill_between([nbins, nbins_ext], [0, 0], [T, T], color='b', alpha=0.08) plt.xlim(-0.5, nbins_ext-0.5) plt.ylabel("Time (seconds)") plt.title("1.5 Periods of Signal") def plot_profile(P): """ P : profile normalised to unit background noise variance """ nbins = len(P) P = np.concatenate((P, P[:nbins//2])) nbins_ext = len(P) plt.bar(range(nbins_ext), P - np.median(P), width=1, color='#404040') ymin, ymax = plt.ylim() plt.fill_between([nbins, nbins_ext], [ymin, ymin], [ymax, ymax], color='b', alpha=0.08) plt.ylim(ymin, ymax) plt.xlim(-0.5, nbins_ext-0.5) plt.xlabel("Phase bin") plt.ylabel("Normalised amplitude") def plot_candidate(cand): """ Plot candidate on the current figure """ # https://matplotlib.org/tutorials/intermediate/gridspec.html nrows, ncols = 2, 7 gs = GridSpec(nrows, ncols, figure=plt.gcf()) plt.subplot(gs[:1, 2:]) plot_subints(cand.subints, cand.tsmeta['tobs']) plt.subplot(gs[1:, 2:]) plot_profile(cand.profile) plt.subplot(gs[:1, :2]) plot_table(cand.params, cand.tsmeta) plt.subplot(gs[1:, :2]) plot_dm_curve(*cand.dm_curve) plt.tight_layout()
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/candidate.py
0.819099
0.598782
candidate.py
pypi
import os import ctypes ### Non-standard imports import numpy as np import numpy.ctypeslib as npct from numpy import log, sin, cos, exp, pi ### Local imports from .ffautils import generate_width_trials import riptide.libcpp as libcpp def generate_signal(nsamp, period, phi0=0.5, ducy=0.02, amplitude=10.0, stdnoise=1.0): """ Generate a time series containing a periodic signal with a von Mises pulse profile. This function is useful for test purposes. Parameters ---------- nsamp : int Number of samples to generate. period : float Period in number of samples. phi0 : float, optional Initial pulse phase in number of periods. ducy : float, optional Duty cycle of the pulse, i.e. the ratio FWHM / Period amplitude : float, optional True amplitude of the signal as defined in the reference paper. The *expectation* of the S/N of the generated signal is S/N_true = amplitude / stdnoise, assuming that a matched filter with the exact shape of the pulse is employed to measure S/N (here: von Mises with given duty cycle). riptide employs boxcar filters in the search, which results in a slight S/N loss. See the reference paper for details. A further degradation will be observed on bright signals, because they bias the estimation of the mean and standard deviation of the noise in a blind search. stdnoise : float, optional Standard deviation of the background noise. If set to 0, a noiseless signal is generated. Returns ------- tseries : ndarray (1D, float) Output time series. """ # von mises parameter kappa = log(2.0) / (2.0 * sin(pi*ducy/2.0)**2) # Generate pulse train phase_radians = (np.arange(nsamp, dtype=float) / period - phi0) * (2 * pi) signal = exp(kappa*(cos(phase_radians) - 1.0)) # Normalise to unit L2-norm, then scale by amplitude scale_factor = amplitude * (signal ** 2).sum() ** -0.5 signal *= scale_factor # Add noise if stdnoise > 0.0: noise = np.random.normal(size=nsamp, loc=0.0, scale=stdnoise) else: noise = 0.0 tseries = signal + noise return tseries def ffa2(data): """ Compute the FFA transform of a two-dimensional input Parameters ---------- data : ndarray (2D) Input time series data in two-dimensional form with shape (m, p), where m is the number of signal periods and p the number of phase bins. Returns ------- transform : ndarray (2D) The FFA transform of 'data', as a float32 2D array of shape (m, p) See Also -------- ffafreq : trial frequencies in the output transform ffaprd : trial periods in the output transform """ return libcpp.ffa2(data) def ffa1(data, p): """ Compute the FFA transform of a one-dimensional input (time series) at base period p Parameters ---------- data : ndarray (1D) Input time series data. If N is the total number of samples in the data, the last N % p samples are ignored, as they do not form a complete pulse period p : int Base period of the transform, in number of samples Returns ------- transform : ndarray (2D) The FFA transform of 'data', as a float32 2D array of shape (m, p), where m is the number of complete pulse periods in the data See Also -------- ffafreq : trial frequencies in the output transform ffaprd : trial periods in the output transform """ if not data.ndim == 1: raise ValueError("input data must be one-dimensional") if not (isinstance(p, int) and p > 0): raise ValueError("p must be an integer > 1") if p > data.size: raise ValueError("p must be smaller than the total number of samples") m = data.size // p return ffa2(data[:m*p].reshape(m, p)) def ffafreq(N, p, dt=1.0): """ Returns the trial frequencies that correspond to every folded profile in the FFA output. Parameters ---------- N : int Total number of samples in the input data p : int Base period of the FFA transform in number of samples dt : float, optional Sampling time Returns ------- freqs : ndarray Array with m elements containing the sequence of trial frequencies in the FFA output """ if not (isinstance(N, int) and N > 0): raise ValueError("N must be a strictly positive integer") if not (isinstance(p, int) and p > 1): raise ValueError("p must be an integer > 1") if not N >= p: raise ValueError("p must be smaller than (or equal to) N") if not dt > 0: raise ValueError("dt must be strictly positive") f0 = 1.0 / p m = N // p if m == 1: f = np.asarray([f0]) else: s = np.arange(m) f = (f0 - s / (m-1.0) * f0**2) f /= dt return f def ffaprd(N, p, dt=1.0): """ Returns the trial periods that correspond to every folded profile in the FFA output. Parameters ---------- N : int Total number of samples in the input data p : int Base period of the FFA transform in number of samples dt : float, optional Sampling time Returns ------- periods : ndarray Array with m elements containing the sequence of trial periods """ return 1.0 / ffafreq(N, p, dt=dt) def boxcar_snr(data, widths, stdnoise=1.0): """ Compute the S/N ratio of pulse profile(s) for a range of boxcar width trials. Parameters ---------- data : ndarray Input profile(s). Can be of any shape, but the last axis must be pulse phase. widths : ndarray, 1D Trial pulse widths, expressed in number of phase bins. stdnoise : float Standard deviation of the background noise in all profiles. Returns ------- snr : ndarray Output with the same shape as data, with an additional axis which represents trial pulse width index. """ widths = np.asarray(widths, dtype=np.uint64) # Number of bins is the length of the last axis b = data.shape[-1] # Input to C++ function must be 2D cppinput = data.reshape(-1, b).astype(np.float32) m = cppinput.shape[0] snr = libcpp.snr2(cppinput, widths, stdnoise) shape = list(data.shape[:-1]) + [widths.size] return snr.reshape(shape) def downsample(data, factor): """ Downsample an array by a real-valued factor. Parameters ---------- data : array_like Time series data to downsample. factor : float Downsampling factor. Returns ------- out : ndarray, float32 Downsampled data. """ return libcpp.downsample(data, factor)
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/libffa.py
0.885012
0.697016
libffa.py
pypi
import numpy as np import riptide.libcpp def running_median(x, width_samples): """ Computes the running median of data with the specified window size. Parameters ---------- x : ndarray One dimensional input data. width_samples : int The width of the running median window in number of elements. It must be an odd number smaller than the input data length, otherwise ValueError is raised. Returns ------- rmed : ndarray The running median of 'x'. Notes ----- The C++ running median code internally pads both ends of the arrray with the edge values. If the input array is not contiguous in memory, a temporary contiguous copy is made and passed to the C++ function (which only accepts C-contiguous arrays). Otherwise no performance hit is incurred. See Also -------- fast_running_median : an approximate running median that runs much faster with large window sizes (> 100 elements). """ return riptide.libcpp.running_median(np.ascontiguousarray(x), width_samples) def scrunch(data, factor): """ Reduce the resolution of data by adding consecutive elements together """ factor = int(factor) N = (data.size // factor) * factor return data[:N].reshape(-1, factor).mean(axis=1) def fast_running_median(data, width_samples, min_points=101): """ Compute an approximate running median of data over large window sizes. The idea is to downsample the data (if necessary), call running_median() on it and linearly interpolate it back to the original resolution. Parameters ---------- data : ndarray Input data width : int Required width of the running median window in number of samples min_points : int The running median is calculated of a time scrunched version of the input data to save time: min_points is the minimum number of scrunched samples that must fit in the running median window. Lower values make the running median calculation less accurate but faster, due to allowing a higher scrunching factor. NOTE: 'min_points' must be an odd number. See Also -------- running_median : an exact running median but slower for large window sizes """ if not (min_points % 2): raise ValueError("min_points must be an odd number") scrunch_factor = int(max(1, width_samples / float(min_points))) if (scrunch_factor == 1): return running_median(data, width_samples) scrunched_data = scrunch(data, scrunch_factor) rmed_lores = running_median(scrunched_data, min_points) x_lores = np.arange(scrunched_data.size) * scrunch_factor + 0.5 * (scrunch_factor - 1) return np.interp(np.arange(data.size), x_lores, rmed_lores)
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/running_medians.py
0.869146
0.79909
running_medians.py
pypi
import pandas class PeakCluster(list): """ Basic list subclass to store a cluster of Peak objects Parameters ---------- peaks: iterable List or iterable of Peak objects rank: int or None, optional Rank within the search, 0 means brightest (default: None) parent_fundamental: PeakCluster or None, optional Parent fundamental PeakCluster, can be set later by the harmonic flagging procedure. None means that the cluster has no parent, and is thus a fundamental itself. (default: None) hfrac: fractions.Fraction or None, optional If there is a parent fundamental, this is the ratio between the cluster's frequency and its fundamental's frequency (default: None) """ def __init__(self, peaks, rank=None, parent_fundamental=None, hfrac=None): super(PeakCluster, self).__init__(peaks) self.rank = rank self.parent_fundamental = parent_fundamental self.hfrac = hfrac @property def is_harmonic(self): return self.parent_fundamental is not None @property def centre(self): return max(self, key=lambda peak: peak.snr) def summary_dataframe(self): """ Returns a pandas.DataFrame with the parameters of the member Peak objects, where the columns are the keys of the dictionary returned by the Peak.summary_dict() method """ return pandas.DataFrame.from_dict([ peak.summary_dict() for peak in self ]) def summary_dict(self): """ """ return { **self.centre.summary_dict(), 'npeaks': len(self), # NOTE: we set some default values when there is no fundamental, instead of None # This is to work around a limitation of pandas.DataFrame where columns with missing # values MUST be of type float, and we want type 'int' for these 'rank': self.rank, 'hfrac_num': self.hfrac.numerator if self.is_harmonic else 0, 'hfrac_denom': self.hfrac.denominator if self.is_harmonic else 0, 'fundamental_rank': self.parent_fundamental.rank if self.is_harmonic else self.rank } def __str__(self): name = type(self).__name__ return f"{name}(size={len(self)}, centre={self.centre})" def __repr__(self): return str(self) def clusters_to_dataframe(clusters): """ Convert list of PeakCluster objects to a pandas DataFrame with a summary of their attributes, including harmonic parameters. The output is sorted by decreasing snr. """ clusters = sorted(clusters, key=lambda c: c.centre.snr, reverse=True) df = pandas.DataFrame.from_dict([cl.summary_dict() for cl in clusters]) # Re-order columns columns = ['rank', 'period', 'dm', 'snr', 'ducy', 'freq', 'npeaks', 'hfrac_num', 'hfrac_denom', 'fundamental_rank'] df = df[columns] return df
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/pipeline/peak_cluster.py
0.93426
0.640397
peak_cluster.py
pypi
import logging import multiprocessing from riptide import TimeSeries, ffa_search, find_peaks log = logging.getLogger('riptide.worker_pool') class WorkerPool(object): """ deredden_params : dict range_confs : list of dicts List of dicts from the 'ranges' section of the YAML config file loader : func Function that takes a file path as its only argument, and returns a TimeSeries object processes : int Number of parallel processes fmt : str TimeSeries file format """ TIMESERIES_LOADERS = { 'sigproc': TimeSeries.from_sigproc, 'presto': TimeSeries.from_presto_inf } def __init__(self, deredden_params, range_confs, processes=1, fmt='presto'): self.deredden_params = deredden_params self.range_confs = range_confs self.loader = self.TIMESERIES_LOADERS[fmt] self.processes = int(processes) def process_fname_list(self, fnames): pool = multiprocessing.Pool(processes=self.processes) # results is a list of lists of Detections results = pool.map(self.process_fname, fnames) # NOTE: don't forget to close the pool to free up RAM # NOTE: and don't forget to join, otherwise the coverage module # does not properly report coverage for sub-processes spawned by # the pool pool.close() pool.join() return [det for dlist in results for det in dlist] def process_fname(self, fname): allpeaks = [] ts = self.loader(fname) dm = ts.metadata['dm'] log.debug("Searching DM = {:.3f}".format(dm)) # Make pre-processing common to all ranges to save time ts = ts.deredden( self.deredden_params['rmed_width'], minpts=self.deredden_params['rmed_minpts'] ) ts = ts.normalise() for conf in self.range_confs: kw_search = dict(conf['ffa_search']) kw_search.update({ 'deredden': False, 'already_normalised': True }) tsdr, pgram = ffa_search(ts, **kw_search) peaks, polycos = find_peaks(pgram, **conf['find_peaks']) allpeaks.extend(peaks) del tsdr, pgram, peaks, polycos # Free RAM ASAP log.debug(f"Done searching DM = {dm:.3f}, peaks found: {len(allpeaks)}") return allpeaks
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/pipeline/worker_pool.py
0.648132
0.162081
worker_pool.py
pypi
import logging import argparse import numpy as np import pandas from riptide import __version__, TimeSeries, ffa_search, find_peaks from riptide.clustering import cluster1d log = logging.getLogger('riptide.rseek') help_formatter = lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog, max_help_position=16) def get_parser(): parser = argparse.ArgumentParser( formatter_class=help_formatter, description=( "FFA search a single time series and print a table of parameters of all significant peaks found." " Peaks found with nearly identical periods at different trial pulse widths are grouped," " but no harmonic filtering is performed." ) ) parser.add_argument( "-f", "--format", type=str, choices=('presto', 'sigproc'), required=True, help="Input TimeSeries format" ) parser.add_argument( "--Pmin", type=float, default=1.0, help="Minimum trial period in seconds" ) parser.add_argument( "--Pmax", type=float, default=10.0, help="Maximum trial period in seconds" ) parser.add_argument( "--bmin", type=int, default=240, help="Minimum number of phase bins used in the search" ) parser.add_argument( "--bmax", type=int, default=260, help="Maximum number of phase bins used in the search" ) parser.add_argument( "--smin", type=float, default=7.0, help="Only report peaks above this minimum S/N" ) parser.add_argument( "--wtsp", type=float, default=1.5, help="Geometric factor between consecutive trial pulse widths" ) parser.add_argument( "--rmed_width", type=float, default=4.0, help="Width (in seconds) of the running median filter to subtract from the input data before processing" ) parser.add_argument( "--rmed_minpts", type=float, default=101, help=( "The running median is calculated of a time scrunched version of the" " input data to save time: rmed_minpts is the minimum number of" " scrunched samples that must fit in the running median window" " Lower values make the running median calculation less accurate but" " faster, due to allowing a higher scrunching factor" ) ) parser.add_argument( "--clrad", type=float, default=0.2, help=( "Frequency clustering radius in units of 1/Tobs. Peaks with similar" " freqs are grouped together, and only the brightest one of the group" " is printed" ) ) parser.add_argument( "fname", type=str, help="Input file name" ) parser.add_argument( '--version', action='version', version=__version__ ) return parser def run_program(args): """ Run the rseek program and return a pandas DataFrame with the detected peak parameters, or None if no significant peaks were found. This is used to check the results in unit tests. Parameters ---------- args : list List of command line arguments Returns ------- peaks : pandas.DataFrame DataFrame with columns: 'period', 'freq', 'width', 'ducy', 'dm', 'snr' """ logging.basicConfig( level='DEBUG', format='%(asctime)s %(filename)18s:%(lineno)-4s %(levelname)-8s %(message)s' ) LOADERS = { 'sigproc': TimeSeries.from_sigproc, 'presto' : TimeSeries.from_presto_inf } loader = LOADERS[args.format] # Search and find peaks ts = loader(args.fname) log.debug(f"Searching period range [{args.Pmin}, {args.Pmax}] seconds with {args.bmin} to {args.bmax} phase bins") __, pgram = ffa_search( ts, period_min=args.Pmin, period_max=args.Pmax, bins_min=args.bmin, bins_max=args.bmax, rmed_width=args.rmed_width, rmed_minpts=args.rmed_minpts, wtsp=args.wtsp, fpmin=1, # No dynamic cap on period_max ducy_max=0.3 ) peaks, __ = find_peaks(pgram, smin=args.smin, clrad=args.clrad) if not peaks: print(f"No peaks found above S/N = {args.smin:.2f}") return None # Cluster peaks, i.e. for each period keep only the trial width # that yield the highest S/N freqs = np.asarray([p.freq for p in peaks]) cluster_indices = cluster1d(freqs, r=args.clrad/ts.length) peaks = [ max([peaks[ii] for ii in indices], key=lambda p: p.snr) for indices in cluster_indices ] peaks = sorted(peaks, key=lambda p: p.snr, reverse=True) # DataFrame constructs from namedtuples nicely df = pandas.DataFrame(peaks) df = df.drop(columns=['iw', 'ip']) # Print this in a pleasing way # https://stackoverflow.com/questions/20937538/how-to-display-pandas-dataframe-of-floats-using-a-format-string-for-columns # NOTE: we have inserted a leading space to each format string on purpose # This makes the output table more readable formatters = { 'period': ' {:.9f}'.format, 'freq': ' {:.9f}'.format, 'ducy': lambda x: ' {:#.2f}%'.format(100 * x), 'dm': ' {:.2f}'.format, 'snr': ' {:.1f}'.format, } output = df.to_string( columns=['period', 'freq', 'width', 'ducy', 'dm', 'snr'], formatters=formatters, index=False ) print(output) return df def main(): """ Console script entry point for 'rseek' """ args = get_parser().parse_args() run_program(args) if __name__ == "__main__": main()
/riptide-ffa-0.2.4.tar.gz/riptide-ffa-0.2.4/riptide/apps/rseek.py
0.801819
0.253431
rseek.py
pypi
import warnings from collections import OrderedDict from typing import List from dotenv import dotenv_values from schema import Schema, Optional, Or from configcrunch import YamlConfigDocument, ConfigcrunchError from configcrunch import variable_helper from riptide.config.document.common_service_command import ContainerDefinitionYamlConfigDocument from riptide.config.errors import RiptideDeprecationWarning from riptide.config.files import CONTAINER_SRC_PATH from riptide.config.service.config_files import * from riptide.config.service.logging import * # todo: validate actual schema values -> better schema | ALL documents from riptide.config.service.ports import get_additional_port from riptide.config.service.volumes import process_additional_volumes from riptide.db.driver import db_driver_for_service from riptide.lib.cross_platform import cppath DOMAIN_PROJECT_SERVICE_SEP = "--" if TYPE_CHECKING: from riptide.config.document.project import Project from riptide.config.document.app import App HEADER = 'service' class Service(ContainerDefinitionYamlConfigDocument): """ A service document. Represents the definition and specification for a running service container. Placed inside an :class:`riptide.config.document.app.App`. The name of the service comes from the key it is assigned in the app. This key is added to the service with the ``$name`` entry during runtime. """ @classmethod def header(cls) -> str: return HEADER @classmethod def schema(cls) -> Schema: """ [$name]: str Name as specified in the key of the parent app. Added by system. DO NOT specify this yourself in the YAML files. [roles]: List[str] A list of roles for this service. You can use arbitrary strings and get services by their assigned roles using :func:`~riptide.config.document.app.App.get_service_by_role`. Some roles are pre-defined and have a special meaning: *main*: This service is the main service for the app. Some commands will default to this service and the proxy URL for this service is shorter. Usually services are accessible via ``http://<project_name>--<service_name>.<proxy_url>``, however the main service is accessible via ``http://<project_name>.<proxy_url>``. Only one service is allowed to have this role. *src*: The container of this service will have access to the source code of the application. It's working directory will be set accordingly. *db*: This service is the primary database. A database driver has to be set (see key ``driver``). This service is then used by Riptide for `database management </user_docs/db.html>`. image: str Docker Image to use [command]: str or map If this is not set: The default command in the image is used and considered in the "default" command group (see below). If it is a string: Command to run inside of the container. Default's to command defined in image. This command will be in the "default" command group (see below). If it is a map: A list of commands that this service supports. Keys are the "command group", values the commands to run. Each service must have a command defined for the "default" command group. You can speficy a command group to use when using `riptide start`. Default is the "default" command group, this one is also used by the Riptide Proxy autostart feature. For more information on this see the `--cmd` flag of `riptide start`. Example:: comamnd: default: "npm run default" debug: "npm run debug" .. warning:: Avoid quotes (", ') inside of commands, as those may lead to strange side effects. [port]: int HTTP port that the web service is accessible under. This port will be used by the proxy server to redirect the traffic. If the port is not specified, the service is not accessible via proxy server. [logging] Logging settings. All logs will be placed inside the "_riptide/logs" directory. [stdout]: bool Whether or not to log the stdout stream of the container's main command. Default: false [stderr]: bool Whether or not to log the stderr stream of the container's main command. Default: false [paths] {key}: str Additional text files to mount into the logging directory. Keys are filename's on host (without .log) and values are the paths inside the containers. [commands] {key}: str Additional commands to start inside the container. Their stdout and stderr will be logged to the file specified by the key. [pre_start]: List[str] List of commands to run, before the container starts. They are run sequentially. The startup will wait for the commands to finish. Exit codes (failures) are ignored. Each of these commands is run in a separate container based on the service specification. Each command is run in a "sh" shell. [post_start]: List[str] List of commands to run, after container starts. They are run sequentially. The startup will wait for the commands to finish. Exit codes (failures) are ignored. Each of these command's is run inside the service container (equivalent of ``docker exec``). Each command is run in a “sh” shell. [environment] Additional environment variables {key}: str Key is the name of the variable, value is the value. [working_directory]: str Working directory for the service, either - absolute, if an absolute path is given - relative to the src specified in the project, if the role "src" is set. - relative to the default working directory from the image, if the role is not set. Defaults to ``.``. [config] Additional configuration files to mount. These files are NOT directly mounted. Instead they are processed and the resulting file is mounted. All variables and variable helpers inside the configuration file are processed. Processed config files are either written to _riptide/processed_config and mounted to containers or (if they are under the source tree of the project and the service has the role 'src') copied to the path in the project and mounted with the rest of the source tree. A '.riptide_info.txt' is added then to explain the origin of this file. Example configuration file (demo.ini):: [demo] domain={{domain()}} project_name={{parent().parent().name}} Resulting file that will be mounted:: [demo] domain=projectname.riptide.local project_name=projectname {key} from: str Path to the configuration file, relative to any YAML file that was used to load the project (including "riptide.yml" and all yaml files used inside the repository; all are searched). Absolute paths are not allowed. to: str Path to store the configuration file at, relative to working directory of container or absolute. [force_recreate: bool] False by default. If false, command containers that use this config file will not try to recreate the processed file if it already exists. If true command containers will also recreate the file every time they are started. Started services always recreate the processed file on start, regardless of this setting. [additional_subdomains]: List[str] List of additional subdomains that will be made available on the host system. [additional_ports] Additional TCP and/or UDP ports that will be made available on the host system. For details see section in `user guide </user_docs/7_working_with_riptide.html#access-other-tcp-udp-ports>`_. {key} title: str Title for this port, will be displayed in ``riptide status`` container: int Port number inside the container host_start: int First port number on host that Riptide will try to reserve, if the port is already occupied, the next one will be used. This port will be reserved and permanently used for this service after that. [additional_volumes] Additional volumes to mount into the container for this command. {key} host: str Path on the host system to the volume. Avoid hardcoded absolute paths. container: str Path inside the container (relative to src of Project or absolute). [mode]: str Whether to mount the volume read-write ("rw", default) or read-only ("ro"). [type]: str Whether this volume is a "directory" (default) or a "file". Only checked if the file/dir does not exist yet on the host system. Riptide will then create it with the appropriate type. [volume_name]: str Name of a named volume for this additional volume. Used instead of "host" if present and the dont_sync_named_volumes_with_host performance setting is enabled. Volumes with the same volume_name have the same content, even across projects. As a constraint, the name of two volumes should only be the same, if the host path specified is also the same, to ensure the same behaviour regardless of if the performance setting is enabled. [driver] The database driver configuration, set this only if the role "db" is set. Detailed documentation can be found in a `separate section </config_docs/database_drivers.html>`_. name: str Name of the database driver, must be installed. config: ??? Specification depends on the database driver. [run_as_current_user]: bool Whether to run as the user using riptide (True) or image default (False). Default: True Riptide will always create the user and group, matching the host user and group, inside the container on startup, regardless of this setting. Some images don't support switching the user, set this to false then. Please note that, if you set this to false and also specify the role 'src', you may run into permission issues. [run_pre_start_as_current_user]: 'auto' or bool Whether to run pre start commands the user using riptide or image default. Default is 'auto' which means the value of `run_as_current_user` will be used. [run_post_start_as_current_user]: 'auto' or bool Whether to run post start commands the user using riptide or image default. Default is 'auto' which means the value of `run_as_current_user` will be used. [allow_full_memlock]: bool Whether to set memlock ulimit to -1:-1 (soft:hard). This is required for some database services, such as Elasticsearch. Note that engines might ignore this setting, if they don't support it. Default: False [read_env_file]: bool If enabled, read the environment variables in the env-files defined in the project (``env_files``). Default: True **Example Document:** .. code-block:: yaml service: image: node:10 roles: - main - src command: 'node server.js' port: 1234 logging: stdout: true stderr: false paths: one: '/foo/bar' commands: two: 'varnishlog' pre_start: - "echo 'command 1'" - "echo 'command 2'" post_start: - "echo 'command 3'" - "echo 'command 4'" environment: SOMETHING_IMPORTANT: foo config: one: from: ci/config.yml to: app_config/config.yml working_directory: www additional_subdomains: - something - foo additional_ports: one: title: MySQL Port container: 3306 host_start: 3006 additional_volumes: temporary_files: host: '{{ get_tempdir() }}' container: /tmp """ return Schema( { Optional('$ref'): str, # reference to other Service documents Optional('$name'): str, # Added by system during processing parent app. Optional('roles'): [str], 'image': str, Optional('command'): Or( str, { "default": str, str: str } ), Optional('port'): int, Optional('logging'): { Optional('stdout'): bool, Optional('stderr'): bool, Optional('paths'): {str: str}, Optional('commands'): {str: str} }, Optional('pre_start'): [str], Optional('post_start'): [str], Optional('environment'): {str: str}, Optional('config'): { str: { 'from': str, '$source': str, # Path to the document that "from" references. Is added durinng loading of service 'to': str, Optional('force_recreate'): bool } }, # Whether to run as the user using riptide (True) or image default (False). Default: True # Limitation: If false and the image USER is not root, # then a user with the id of the image USER must exist in /etc/passwd of the image. Optional('run_as_current_user'): bool, Optional('run_pre_start_as_current_user'): Or('auto', bool), Optional('run_post_start_as_current_user'): Or('auto', bool), # DEPRECATED. Inverse of run_as_current_user if set Optional('run_as_root'): bool, # Whether to create the riptide user and group, mapped to current user. Default: False Optional('dont_create_user'): bool, Optional('working_directory'): str, Optional('additional_subdomains'): [str], Optional('additional_ports'): { str: { 'title': str, 'container': int, 'host_start': int } }, Optional('additional_volumes'): { str: { 'host': str, 'container': str, Optional('mode'): Or('rw', 'ro'), # default: rw - can be rw/ro. Optional('type'): Or('directory', 'file'), # default: directory Optional('volume_name'): str } }, Optional('allow_full_memlock'): bool, # db only Optional('driver'): { 'name': str, 'config': any # defined by driver }, Optional('read_env_file'): bool } ) def _initialize_data_after_merge(self, data): """ Initializes non-set fields, initiliazes the database driver and creates all files for ``config`` entries. """ self._db_driver = None self._loaded_port_mappings = None if "run_as_root" in data: warnings.warn( "Deprecated key run_as_root = %r in a service found. Please replace with run_as_current_user = %r." % (data["run_as_root"], not data["run_as_root"]), RiptideDeprecationWarning ) data["run_as_current_user"] = not data["run_as_root"] if "run_as_current_user" not in data: data["run_as_current_user"] = True if "run_pre_start_as_current_user" not in data or data["run_pre_start_as_current_user"] == "auto": data["run_pre_start_as_current_user"] = data["run_as_current_user"] if "run_post_start_as_current_user" not in data or data["run_post_start_as_current_user"] == "auto": data["run_post_start_as_current_user"] = data["run_as_current_user"] if "dont_create_user" not in data: data["dont_create_user"] = False if "pre_start" not in data: data["pre_start"] = [] if "post_start" not in data: data["post_start"] = [] if "roles" not in data: data["roles"] = [] if "working_directory" not in data: data["working_directory"] = "." if "read_env_file" not in data: data["read_env_file"] = True if "additional_subdomains" not in data: data["additional_subdomains"] = [] if "db" in data["roles"]: self._db_driver = db_driver_for_service.get(data, self) if self._db_driver: # Collect additional ports for the db driver my_original_ports = data["additional_ports"] if "additional_ports" in data else {} db_ports = self._db_driver.collect_additional_ports() data["additional_ports"] = db_ports.copy() data["additional_ports"].update(my_original_ports) # Load the absolute path of the config documents specified in config[]["from"] if self.absolute_paths: folders_to_search = [os.path.dirname(path) for path in self.absolute_paths] else: try: folders_to_search = [self.get_project().folder()] except IndexError: # Fallback: Assume cwd folders_to_search = [os.getcwd()] if "config" in data and isinstance(data["config"], dict): for config in data["config"].values(): # sanity check if from and to are in this config entry, if not it's invalid. # the validation will catch this later if "from" not in config or "to" not in config: continue # Doesn't allow . or os.sep at the beginning for security reasons. if config["from"].startswith(".") or config["from"].startswith(os.sep): raise ConfigcrunchError(f"Config 'from' items in services may not start with . or {os.sep}.") config["$source"] = None for folder in folders_to_search: path_to_config = os.path.join(folder, config["from"]) if os.path.exists(path_to_config): config["$source"] = path_to_config break if config["$source"] is None: # Did not find the file at any of the possible places p = self.absolute_paths[0] if self.absolute_paths else '???' raise ConfigcrunchError( f"Configuration file '{config['from']}' in service at '{p}' does not exist or is not a file. " f"This probably happens because one of your services has an invalid setting for the 'config' " f"entries. Based on how the configuration was merged, the following places were searched: " f"{str(folders_to_search)}" ) return data def _initialize_data_after_variables(self, data): """ Normalizes all host-paths to only use the system-type directory separator. """ if "additional_volumes" in data: for obj in data["additional_volumes"].values(): obj["host"] = cppath.normalize(obj["host"]) if "config" in data: for obj in data["config"].values(): obj["$source"] = cppath.normalize(obj["$source"]) return data def validate(self) -> bool: """ Validates the Schema and if a database driver is defined, validates that the driver is installed. """ if not super().validate(): return False # Db Driver constraints. If role db is set, a "driver" has to be set and code has to exist for it. if self.internal_contains("roles") and "db" in self.internal_get("roles"): if not self.internal_contains("driver") or self._db_driver is None: raise ConfigcrunchError( f"Service {self.internal_get('$name')} validation: " f"If a service has the role 'db' it has to have a valid " f"'driver' entry with a driver that is available." ) with self.internal_access(): self._db_driver.validate_service() return True def before_start(self): """Loads data required for service start, called by riptide_project_start_ctx()""" # Collect ports project = self.get_project() self._loaded_port_mappings = {} if "additional_ports" in self: for port_request in self["additional_ports"].values(): self._loaded_port_mappings[port_request["container"]] = get_additional_port(project, self, port_request["host_start"]) # Create working_directory if it doesn't exist and it is relative if "working_directory" in self and not PurePosixPath(self["working_directory"]).is_absolute(): os.makedirs(os.path.join( self.get_project().folder(), self.get_project()["src"], self["working_directory"] ), exist_ok=True) def get_command(self, group: str = "default"): """Returns the command to use for the given group. 'command' must be set in self""" if "command" not in self: raise ValueError("No command defined.") if isinstance(self["command"], dict): if group in self["command"]: return self["command"][group] return self["command"]["default"] else: return self["command"] def get_project(self) -> 'Project': """ Returns the project or raises an error if this is not assigned to a project :raises: IndexError: If not assigned to a project """ try: return self.parent_doc.parent_doc except Exception as ex: raise IndexError("Expected service to have a project assigned") from ex def collect_volumes(self) -> OrderedDict: """ Collect volume mappings that this service should be getting when running. Volumes are built from following sources: * Source code is mounted as volume if role "src" is set * Config entries are compiled using Jinja and mounted to their paths * Logging files/streams are put into the _riptide/logs folder. * If role "db" is set, and a database driver is found, it's volumes are added * additional_volumes are added. Also creates/updates necessary files and folders (eg. compiled configuration, logging). :return: dict. Return format is the docker container API volumes dict format. See: https://docker-py.readthedocs.io/en/stable/containers.html#docker.models.containers.ContainerCollection.run The volume definitions may contain an additional key 'name', which should be used by the engine, instead of the host path if the dont_sync_named_volumes_with_host performance option is enabled. """ project = self.get_project() volumes = OrderedDict({}) # role src if "src" in self["roles"]: volumes[project.src_folder()] = {'bind': CONTAINER_SRC_PATH, 'mode': 'rw'} # config if "config" in self: for config_name, config in self["config"].items(): bind_path = str(PurePosixPath('/src/').joinpath(PurePosixPath(config["to"]))) process_config(volumes, config_name, config, self, bind_path) # logging if "logging" in self: create_logging_path(self) if "stdout" in self["logging"] and self["logging"]["stdout"]: volumes[get_logging_path_for(self, 'stdout')] = {'bind': LOGGING_CONTAINER_STDOUT, 'mode': 'rw'} if "stderr" in self["logging"] and self["logging"]["stderr"]: volumes[get_logging_path_for(self, 'stderr')] = {'bind': LOGGING_CONTAINER_STDERR, 'mode': 'rw'} if "paths" in self["logging"]: for name, path in self["logging"]["paths"].items(): logging_host_path = get_logging_path_for(self, name) volumes[logging_host_path] = {'bind': path, 'mode': 'rw'} if "commands" in self["logging"]: for name in self["logging"]["commands"].keys(): logging_host_path = get_logging_path_for(self, name) logging_command_stdout = get_command_logging_container_path(name) volumes[logging_host_path] = {'bind': logging_command_stdout, 'mode': 'rw'} # db driver if self._db_driver: db_driver_volumes = self._db_driver.collect_volumes() for vol in db_driver_volumes.keys(): # Create db driver volumes as directories if they don't exist yet os.makedirs(vol, exist_ok=True) volumes.update(db_driver_volumes) # additional_volumes if "additional_volumes" in self: volumes.update(process_additional_volumes(list(self['additional_volumes'].values()), project.folder())) return volumes def collect_environment(self) -> dict: """ Collect environment variables from the "environment" entry in the service configuration. Additionally, all configurations in the ``.env`` file in the project folder are also passed to the container (if ``read_env_file``) is True). Environment priority: - Environment variables defined in the ``environment`` of the command. - Environment variables of the ``.env`` file. - If database: Environment variables provided by the database driver. :return: dict. Returned format is ``{key1: value1, key2: value2}``. """ env = {} if "environment" in self: for name, value in self["environment"].items(): env[name] = value if "read_env_file" not in self or self["read_env_file"]: for env_file_path in self.get_project()['env_files']: env.update(dotenv_values(os.path.join(self.get_project().folder(), env_file_path))) # db driver if self._db_driver: env.update(self._db_driver.collect_environment()) return env def collect_ports(self) -> dict: """ Takes additional_ports and returns the actual host/container mappings for these ports. The resulting host parts are system-unique, so Riptide will not assign a port twice across multiple projects/services. To achieve this, port bindings are saved into $CONFIG_DIR/ports.json. :return: dict. Returned format is {port_service1: port_host1, port_service2: port_host2} """ # This is already loaded in before_start. Make sure to use riptide_start_project_ctx # when starting if this is None return self._loaded_port_mappings def error_str(self) -> str: return f"{self.__class__.__name__}<{(self.internal_get('$name') if self.internal_contains('$name') else '???')}>" @variable_helper def parent(self) -> 'App': """ Returns the app that this service belongs to. Example usage:: something: '{{ parent().notices.usage }}' Example result:: something: 'This is easy to use.' """ # noinspection PyTypeChecker return super().parent() @variable_helper def volume_path(self) -> str: """ Returns the (host) path to a service-unique directory for storing container data. Example usage:: additional_volumes: cache: host: '{{ volume_path() }}/cache' container: '/foo/bar/cache' Example result:: additional_volumes: cache: host: '/home/peter/my_projects/project1/_riptide/data/service_name/cache' container: '/foo/bar/cache' """ path = os.path.join(get_project_meta_folder(self.get_project().folder()), 'data', self.internal_get("$name")) return path @variable_helper def get_working_directory(self) -> str: """ Returns the path to the working directory of the service **inside** the container. .. warning:: Does not work as expected for services started via "start-fg". Example usage:: something: '{{ get_working_directory() }}' Example result:: something: '/src/working_dir' """ workdir = None if "src" not in self.internal_get("roles") else CONTAINER_SRC_PATH if self.internal_contains("working_directory"): if PurePosixPath(self.internal_get("working_directory")).is_absolute(): return self.internal_get("working_directory") elif workdir is not None: return str(PurePosixPath(workdir).joinpath(self.internal_get("working_directory"))) return workdir @variable_helper def domain(self) -> str: """ Returns the full domain name that this service should be available under, without protocol. This is the same domain as used for the proxy server. Example usage:: something: 'https://{{ domain() }}' Example result:: something: 'https://project--service.riptide.local' """ if "main" in self.internal_get("roles"): return self.get_project().internal_get("name") + "." + self.parent_doc.parent_doc.parent_doc.internal_get("proxy")["url"] return self.get_project().internal_get("name") + DOMAIN_PROJECT_SERVICE_SEP + self.internal_get("$name") + "." + self.parent_doc.parent_doc.parent_doc.internal_get("proxy")["url"] @variable_helper def additional_domains(self) -> Dict[str, str]: """ Takes additional_subdomains and returns subdomain/full domain name mappings that this service should be available under in addition to the main domain. These are the same domains as used for the proxy server. Example usage:: something: {% for subdomain, additional_domain in additional_domains().items() %} {{ subdomain }}: {{ additional_domain }} {% endfor %} Example result:: something: first: 'https://first.project--service.riptide.local' second: 'https://seccond.project--service.riptide.local' """ if "main" in self.internal_get("roles"): return {subdomain: f'{subdomain}.{self.get_project().internal_get("name")}.{self.parent_doc.parent_doc.parent_doc.internal_get("proxy")["url"]}' for subdomain in self.internal_get("additional_subdomains")} return {subdomain: f'{subdomain}.{self.get_project().internal_get("name")}{DOMAIN_PROJECT_SERVICE_SEP}{self.internal_get("$name")}.{self.parent_doc.parent_doc.parent_doc.internal_get("proxy")["url"]}' for subdomain in self.internal_get("additional_subdomains")}
/riptide_lib-0.8.0b1-py3-none-any.whl/riptide/config/document/service.py
0.666497
0.1929
service.py
pypi
from collections import OrderedDict import os from pathlib import PurePosixPath from dotenv import dotenv_values from schema import Schema, Optional, Or from typing import TYPE_CHECKING, Union from configcrunch import variable_helper from riptide.config.document.common_service_command import ContainerDefinitionYamlConfigDocument from riptide.config.files import get_project_meta_folder, CONTAINER_SRC_PATH from riptide.config.service.config_files import process_config from riptide.config.service.volumes import process_additional_volumes from riptide.lib.cross_platform import cppath if TYPE_CHECKING: from riptide.config.document.project import Project from riptide.config.document.app import App HEADER = 'command' KEY_IDENTIFIER_IN_SERVICE_COMMAND = 'in_service_with_role' class Command(ContainerDefinitionYamlConfigDocument): """ A command document. Specifies a CLI command to be executable by the user. Placed inside an :class:`riptide.config.document.app.App`. """ @classmethod def header(cls) -> str: return HEADER @classmethod def schema(cls) -> Schema: """ Can be either a normal command, a command in a service, or an alias command. """ return Schema( Or(cls.schema_alias(), cls.schema_normal(), cls.schema_in_service()) ) @classmethod def schema_normal(cls): """ Normal commands are executed in seperate containers, that are running in the same container network as the services. [$name]: str Name as specified in the key of the parent app. Added by system. DO NOT specify this yourself in the YAML files. image: str Docker Image to use [command]: str Command to run inside of the container. Default's to command defined in image. .. warning:: Avoid quotes (", ') inside of the command, as those may lead to strange side effects. [additional_volumes] Additional volumes to mount into the container for this command. {key} host: str Path on the host system to the volume. Avoid hardcoded absolute paths. container: str Path inside the container (relative to src of Project or absolute). [mode]: str Whether to mount the volume read-write ("rw", default) or read-only ("ro"). [type]: str Whether this volume is a "directory" (default) or a "file". Only checked if the file/dir does not exist yet on the host system. Riptide will then create it with the appropriate type. [volume_name]: str Name of a named volume for this additional volume. Used instead of "host" if present and the dont_sync_named_volumes_with_host performance setting is enabled. Volumes with the same volume_name have the same content, even across projects. As a constraint, the name of two volumes should only be the same, if the host path specified is also the same, to ensure the same behaviour regardless of if the performance setting is enabled. [environment] Additional environment variables {key}: str Key is the name of the variable, value is the value. [config_from_roles]: List[str] List of role names. All files defined under "config" for services matching the roles are mounted into the command container. [read_env_file]: bool If enabled, read the environment variables in the env-files defined in the project (``env_files``). Default: True [use_host_network]: bool If enabled, the container uses network mode `host`. Overrides network and port settings Default: False **Example Document:** .. code-block:: yaml command: image: riptidepy/php command: 'php index.php' """ return Schema({ Optional('$ref'): str, # reference to other Service documents Optional('$name'): str, # Added by system during processing parent app. 'image': str, Optional('command'): str, Optional('additional_volumes'): { str: { 'host': str, 'container': str, Optional('mode'): str, # default: rw - can be rw/ro. Optional('type'): Or('directory', 'file'), # default: directory Optional('volume_name'): str } }, Optional('environment'): {str: str}, Optional('config_from_roles'): [str], Optional('read_env_file'): bool, Optional('use_host_network'): bool, }) @classmethod def schema_in_service(cls): """ Command is run in a running service container. If the service container is not running, a new container is started based on the definition of the service. [$name]: str Name as specified in the key of the parent app. Added by system. DO NOT specify this yourself in the YAML files. in_service_with_role: str Runs the command in the first service which has this role. May lead to unexpected results, if multiple services match the role. command: str Command to run inside of the container. .. warning:: Avoid quotes (", ') inside of the command, as those may lead to strange side effects. [environment] Additional environment variables. The container also has access to the environment of the service. Variables in the current user's env will override those values and variables defined here, will override all other. {key}: str Key is the name of the variable, value is the value. [read_env_file]: bool If enabled, read the environment variables in the env-files defined in the project (``env_files``). Default: True [use_host_network]: bool If enabled, the container uses network mode `host`. Overrides network and port settings Default: False **Example Document:** .. code-block:: yaml command: in_service_with_role: php command: 'php index.php' """ return Schema({ Optional('$ref'): str, # reference to other Service documents Optional('$name'): str, # Added by system during processing parent app. KEY_IDENTIFIER_IN_SERVICE_COMMAND: str, 'command': str, Optional('environment'): {str: str}, Optional('read_env_file'): bool, Optional('use_host_network'): bool, }) @classmethod def schema_alias(cls): """ Aliases another command. [$name]: str Name as specified in the key of the parent app. Added by system. DO NOT specify this yourself in the YAML files. aliases: str Name of the command that is aliased by this command. """ return Schema({ Optional('$ref'): str, # reference to other Service documents Optional('$name'): str, # Added by system during processing parent app. 'aliases': str }) def _initialize_data_after_variables(self, data: dict) -> dict: """ Normalize all host-paths to only use the system-type directory separator """ if "additional_volumes" in data: for obj in data["additional_volumes"].values(): obj["host"] = cppath.normalize(obj["host"]) if "read_env_file" not in self: data["read_env_file"] = True return data def get_project(self) -> 'Project': """ Returns the project or raises an error if this is not assigned to a project :raises: IndexError: If not assigned to a project """ try: return self.parent_doc.parent_doc except Exception as ex: raise IndexError("Expected command to have a project assigned") from ex def collect_volumes(self) -> OrderedDict: """ Collect volume mappings that this command should be getting when running. Only applicable to commands matching the "normal" schema. Volumes are built from following sources: * Source code is mounted as volume if role "src" is set * SSH_AUTH_SOCKET path is added as a volume * additional_volumes are added. * All config files from all services matching the roles in 'config_from_roles' are added. No service is processed twice. Order is arbitrary, with the exception that roles are processed in the order they are defined in. :return: dict. Return format is the docker container API volumes dict format. See: https://docker-py.readthedocs.io/en/stable/containers.html#docker.models.containers.ContainerCollection.run The volume definitions may contain an additional key 'name', which should be used by the engine, instead of the host path if the dont_sync_named_volumes_with_host performance option is enabled. """ project = self.get_project() volumes = OrderedDict({}) # source code volumes[project.src_folder()] = {'bind': CONTAINER_SRC_PATH, 'mode': 'rw'} # If SSH_AUTH_SOCK is set, provide the ssh auth socket as a volume if 'SSH_AUTH_SOCK' in os.environ: volumes[os.environ['SSH_AUTH_SOCK']] = {'bind': os.environ['SSH_AUTH_SOCK'], 'mode': 'rw'} # additional_volumes if "additional_volumes" in self: # Shared with services logic volumes.update(process_additional_volumes(list(self['additional_volumes'].values()), project.folder())) # config_from_role if "config_from_roles" in self: services_already_checked = [] for role in self["config_from_roles"]: for service in self.parent().get_services_by_role(role): if "config" in service and service not in services_already_checked: services_already_checked.append(service) for config_name, config in service["config"].items(): force_recreate = False if "force_recreate" in service["config"][config_name] and service["config"][config_name]["force_recreate"]: force_recreate = True bind_path = str(PurePosixPath('/src/').joinpath(PurePosixPath(config["to"]))) process_config(volumes, config_name, config, service, bind_path, regenerate=force_recreate) return volumes def resolve_alias(self) -> 'Command': """ If this is not an alias, returns self. Otherwise returns command that is aliased by this (recursively). """ if "aliases" in self: return self.parent()["commands"][self["aliases"]].resolve_alias() return self def collect_environment(self) -> dict: """ Collect environment variables. The passed environment is simple all of the riptide's process environment, minus some important meta-variables such as USERNAME and PATH. Also collects all environment variables defined in command and sets LINES and COLUMNS based on terminal size. Additionally, all configurations in the ``.env`` file in the project folder are also passed to the container (if ``read_env_file``) is True). Environment priority: - Current shell environment variables. - Environment variables defined in the ``environment`` of the command. - Environment variables of the ``.env`` file. - LINES and COLUMNS from current terminal size. :return: dict. Returned format is ``{key1: value1, key2: value2}``. """ env = os.environ.copy() keys_to_remove = {"PATH", "PS1", "USERNAME", "PWD", "SHELL", "HOME", "TMPDIR"}.intersection(set(env.keys())) for key in keys_to_remove: del env[key] if "environment" in self: for key, value in self['environment'].items(): env[key] = value if "read_env_file" not in self or self["read_env_file"]: for env_file_path in self.get_project()['env_files']: env.update(dotenv_values(os.path.join(self.get_project().folder(), env_file_path))) try: cols, lines = os.get_terminal_size() env['COLUMNS'] = str(cols) env['LINES'] = str(lines) except OSError: pass return env def get_service(self, app: 'App') -> Union[str, None]: """ Only applicable to "in service" commands. Returns the name of the service in app. Raises ValueError if the service does not exist in app or if not applicable. :param app: The app to search in :return: Name of the service (key) in app. """ if KEY_IDENTIFIER_IN_SERVICE_COMMAND not in self.doc: raise TypeError('get_service can only be used on "in service" commands.') if 'services' not in app: raise ValueError( f"Command {(self['$name'] if '$name' in self else '???')} can not run in service with role " f"{self.doc[KEY_IDENTIFIER_IN_SERVICE_COMMAND]}: " f"The app has no services.") for service_name, service in app['services'].items(): if 'roles' in service and self.doc[KEY_IDENTIFIER_IN_SERVICE_COMMAND] in service['roles']: return service_name raise ValueError(f"Command {(self['$name'] if '$name' in self else '???')} can not run in service with role " f"{self.doc[KEY_IDENTIFIER_IN_SERVICE_COMMAND]}: " f"No service with this role found in the app.") def error_str(self) -> str: return f"{self.__class__.__name__}<{(self.internal_get('$name') if self.internal_contains('$name') else '???')}>" @variable_helper def parent(self) -> 'App': """ Returns the app that this command belongs to. Example usage:: something: '{{ parent().notices.usage }}' Example result:: something: 'This is easy to use.' """ # noinspection PyTypeChecker return super().parent() @variable_helper def volume_path(self) -> str: """ Returns the (host) path to a command-unique directory for storing container data. Example usage:: additional_volumes: command_cache: host: '{{ volume_path() }}/command_cache' container: '/foo/bar/cache' Example result:: additional_volumes: command_cache: host: '/home/peter/my_projects/project1/_riptide/cmd_data/command_name/command_cache' container: '/foo/bar/cache' """ path = os.path.join(get_project_meta_folder(self.get_project().folder()), 'cmd_data', self.internal_get("$name")) os.makedirs(path, exist_ok=True) return path
/riptide_lib-0.8.0b1-py3-none-any.whl/riptide/config/document/command.py
0.835383
0.192824
command.py
pypi
from schema import Optional, Schema, Or from typing import List, Union, TYPE_CHECKING, Tuple, Type from configcrunch import YamlConfigDocument, DocReference, ConfigcrunchError, REMOVE from configcrunch import variable_helper from riptide.config.document.command import Command from riptide.config.document.service import Service if TYPE_CHECKING: from riptide.config.document.project import Project HEADER = 'app' class App(YamlConfigDocument): """ An application. Consists of (multiple) :class:`riptide.config.document.service.Service` and (multiple) :class:`riptide.config.document.command.Command` and is usually included in a :class:`riptide.config.document.project.Project`. """ @classmethod def header(cls) -> str: return HEADER @classmethod def schema(cls) -> Schema: """ name: str Name describing this app. [notices] [usage]: str Text that will be shown when the interactive `setup wizard </user_docs/4_project.html>`_ ist started. This text should describe additional steps needed to finish the setup of the app and general usage notes. [installation]: str Text that will be shown, when the user selects a new installation (from scratch) for this app. This text should explain how to execute the first-time-setup of this app when using Riptide. [import] {key} Files and directories to import during the interactive setup wizard. target: str Target path that the file or directory should be imported to, relative to the directory of the riptide.yml name: str Human-readable name of this import file. This is displayed during the interactive setup and should explain what kind of file or directory is imported. [services] {key}: :class:`~riptide.config.document.service.Service` Services for this app. [commands] {key}: :class:`~riptide.config.document.command.Command` Commands for this app. [unimportant_paths]: List[str] Normally all files inside containers are shared with the host (for commands and services with role 'src'). This list specifies files that don't need to be synced with the host. This means, that these files will only be uploaded to the container on start and changes will not be visible on the host. Changes that are made on the host file system may also not be visible inside the container. This increases performance on non-native platforms (Mac and Windows). This feature is only enabled if the system configuration performance setting ``dont_sync_unimportant_src`` is enabled. If the feature is disabled, all files are shared with the host. See the documentation for that setting for more information. All paths are relative to the src of the project. Only directories are supported. **Example Document:** .. code-block:: yaml app: name: example notices: usage: Hello World! import: example: target: path/inside/project name: Example Files services: example: $ref: /service/example commands: example: $ref: /command/example """ return Schema( { Optional('$ref'): str, # reference to other App documents 'name': str, Optional('notices'): { Optional('usage'): str, Optional('installation'): str }, Optional('import'): { str: { 'target': str, 'name': str } }, Optional('services'): { str: DocReference(Service) }, Optional('commands'): { str: DocReference(Command) }, Optional('unimportant_paths'): [str] } ) @classmethod def subdocuments(cls) -> List[Tuple[str, Type[YamlConfigDocument]]]: return [ ("services[]", Service), ("commands[]", Command), ] def validate(self): """ Initialise the optional services and command dicts. Has to be done after validate because of some issues with Schema validation error handling :( """ ret_val = super().validate() if ret_val: if not self.internal_contains("services"): self.internal_set("services", {}) if not self.internal_contains("commands"): self.internal_set("commands", {}) return ret_val def error_str(self) -> str: return f"{self.__class__.__name__}<{(self.internal_get('name') if self.internal_contains('name') else '???')}>" @variable_helper def parent(self) -> 'Project': """ Returns the project that this app belongs to. Example usage:: something: '{{ parent().src }}' Example result:: something: '.' """ # noinspection PyTypeChecker return super().parent() @variable_helper def get_service_by_role(self, role_name: str) -> Union[Service, None]: """ Returns any service with the given role name (first found) or None. Example usage:: something: '{{ get_service_by_role("main")["$name"] }}' Example result:: something: 'service1' :param role_name: Role to search for """ for service in self.internal_get("services").values(): if service.internal_contains("roles") and role_name in service.internal_get("roles"): return service raise ValueError(f"No service with role {role_name} found in the app.") @variable_helper def get_services_by_role(self, role_name: str) -> List[Service]: """ Returns all services with the given role name. :param role_name: Role to search for """ services = [] for service in self.internal_get("services").values(): if service.internal_contains("roles") and role_name in service.internal_get("roles"): services.append(service) return services
/riptide_lib-0.8.0b1-py3-none-any.whl/riptide/config/document/app.py
0.888958
0.245108
app.py
pypi